summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Dickinson <christopher.s.dickinson@gmail.com>2015-05-05 13:48:55 -0700
committerRod Vagg <rod@vagg.org>2015-08-04 11:56:09 -0700
commitd58e780504bdba6c5897c48428fd984c5b5f96fe (patch)
tree033f1568ae3f9f077aceb843b42eb1ed1739ce0f
parent21d31c08e7d0b6865e52452750b20b05e6dca443 (diff)
downloadandroid-node-v8-d58e780504bdba6c5897c48428fd984c5b5f96fe.tar.gz
android-node-v8-d58e780504bdba6c5897c48428fd984c5b5f96fe.tar.bz2
android-node-v8-d58e780504bdba6c5897c48428fd984c5b5f96fe.zip
deps: update v8 to 4.3.61.21
* @indutny's SealHandleScope patch (484bebc38319fc7c622478037922ad73b2edcbf9) has been cherry picked onto the top of V8 to make it compile. * There's some test breakage in contextify. * This was merged at the request of the TC. PR-URL: https://github.com/iojs/io.js/pull/1632
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/BUILD.gn116
-rw-r--r--deps/v8/ChangeLog446
-rw-r--r--deps/v8/DEPS19
-rw-r--r--deps/v8/Makefile3
-rw-r--r--deps/v8/Makefile.android8
-rw-r--r--deps/v8/PRESUBMIT.py1
-rw-r--r--deps/v8/README.md6
-rw-r--r--deps/v8/build/android.gypi18
-rw-r--r--deps/v8/build/detect_v8_host_arch.py8
-rw-r--r--deps/v8/build/features.gypi4
-rwxr-xr-xdeps/v8/build/get_landmines.py1
-rw-r--r--deps/v8/build/gyp_environment.py52
-rwxr-xr-xdeps/v8/build/gyp_v841
-rw-r--r--deps/v8/build/landmine_utils.py9
-rwxr-xr-xdeps/v8/build/landmines.py178
-rw-r--r--deps/v8/build/standalone.gypi89
-rw-r--r--deps/v8/build/toolchain.gypi13
-rw-r--r--deps/v8/include/v8-debug.h15
-rw-r--r--deps/v8/include/v8-profiler.h33
-rw-r--r--deps/v8/include/v8-util.h138
-rw-r--r--deps/v8/include/v8-version.h4
-rw-r--r--deps/v8/include/v8.h1162
-rw-r--r--deps/v8/include/v8config.h17
-rw-r--r--deps/v8/src/DEPS2
-rw-r--r--deps/v8/src/accessors.cc97
-rw-r--r--deps/v8/src/api-natives.cc6
-rw-r--r--deps/v8/src/api.cc3154
-rw-r--r--deps/v8/src/api.h12
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h39
-rw-r--r--deps/v8/src/arm/assembler-arm.cc26
-rw-r--r--deps/v8/src/arm/assembler-arm.h11
-rw-r--r--deps/v8/src/arm/builtins-arm.cc226
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc392
-rw-r--r--deps/v8/src/arm/cpu-arm.cc13
-rw-r--r--deps/v8/src/arm/debug-arm.cc42
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc2
-rw-r--r--deps/v8/src/arm/disasm-arm.cc3
-rw-r--r--deps/v8/src/arm/frames-arm.h5
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc215
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc6
-rw-r--r--deps/v8/src/arm/lithium-arm.cc18
-rw-r--r--deps/v8/src/arm/lithium-arm.h24
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc145
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc139
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h24
-rw-r--r--deps/v8/src/arm/simulator-arm.cc3
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h46
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc84
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h23
-rw-r--r--deps/v8/src/arm64/builtins-arm64.cc237
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc395
-rw-r--r--deps/v8/src/arm64/debug-arm64.cc41
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc2
-rw-r--r--deps/v8/src/arm64/frames-arm64.h5
-rw-r--r--deps/v8/src/arm64/full-codegen-arm64.cc226
-rw-r--r--deps/v8/src/arm64/instructions-arm64.cc22
-rw-r--r--deps/v8/src/arm64/instructions-arm64.h41
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc9
-rw-r--r--deps/v8/src/arm64/lithium-arm64.cc20
-rw-r--r--deps/v8/src/arm64/lithium-arm64.h26
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.cc201
-rw-r--r--deps/v8/src/arm64/lithium-codegen-arm64.h21
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc155
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h37
-rw-r--r--deps/v8/src/array.js2
-rw-r--r--deps/v8/src/arraybuffer.js8
-rw-r--r--deps/v8/src/assembler.cc62
-rw-r--r--deps/v8/src/assembler.h31
-rw-r--r--deps/v8/src/ast-numbering.cc12
-rw-r--r--deps/v8/src/ast-value-factory.h66
-rw-r--r--deps/v8/src/ast.cc95
-rw-r--r--deps/v8/src/ast.h172
-rw-r--r--deps/v8/src/background-parsing-task.cc45
-rw-r--r--deps/v8/src/background-parsing-task.h13
-rw-r--r--deps/v8/src/bailout-reason.h15
-rw-r--r--deps/v8/src/base/bits.h13
-rw-r--r--deps/v8/src/base/cpu.cc2
-rw-r--r--deps/v8/src/base/logging.cc25
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc6
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc5
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc5
-rw-r--r--deps/v8/src/base/platform/platform.h2
-rw-r--r--deps/v8/src/bootstrapper.cc268
-rw-r--r--deps/v8/src/builtins.cc13
-rw-r--r--deps/v8/src/builtins.h6
-rw-r--r--deps/v8/src/char-predicates-inl.h7
-rw-r--r--deps/v8/src/char-predicates.h2
-rw-r--r--deps/v8/src/code-factory.cc56
-rw-r--r--deps/v8/src/code-factory.h11
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc237
-rw-r--r--deps/v8/src/code-stubs.cc24
-rw-r--r--deps/v8/src/code-stubs.h66
-rw-r--r--deps/v8/src/codegen.cc38
-rw-r--r--deps/v8/src/collection.js41
-rw-r--r--deps/v8/src/compilation-cache.cc1
-rw-r--r--deps/v8/src/compiler.cc579
-rw-r--r--deps/v8/src/compiler.h390
-rw-r--r--deps/v8/src/compiler/access-builder.cc24
-rw-r--r--deps/v8/src/compiler/access-builder.h8
-rw-r--r--deps/v8/src/compiler/all-nodes.cc29
-rw-r--r--deps/v8/src/compiler/all-nodes.h21
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc83
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h14
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc179
-rw-r--r--deps/v8/src/compiler/arm/linkage-arm.cc5
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc130
-rw-r--r--deps/v8/src/compiler/arm64/instruction-codes-arm64.h32
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc256
-rw-r--r--deps/v8/src/compiler/arm64/linkage-arm64.cc5
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc644
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h148
-rw-r--r--deps/v8/src/compiler/ast-loop-assignment-analyzer.cc1
-rw-r--r--deps/v8/src/compiler/basic-block-instrumentor.cc3
-rw-r--r--deps/v8/src/compiler/change-lowering.cc221
-rw-r--r--deps/v8/src/compiler/change-lowering.h6
-rw-r--r--deps/v8/src/compiler/code-generator-impl.h32
-rw-r--r--deps/v8/src/compiler/code-generator.cc70
-rw-r--r--deps/v8/src/compiler/code-generator.h26
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.cc124
-rw-r--r--deps/v8/src/compiler/common-operator-reducer.h21
-rw-r--r--deps/v8/src/compiler/common-operator.cc166
-rw-r--r--deps/v8/src/compiler/common-operator.h8
-rw-r--r--deps/v8/src/compiler/control-builders.cc12
-rw-r--r--deps/v8/src/compiler/control-builders.h11
-rw-r--r--deps/v8/src/compiler/control-equivalence.h40
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.cc189
-rw-r--r--deps/v8/src/compiler/control-flow-optimizer.h3
-rw-r--r--deps/v8/src/compiler/control-reducer.cc160
-rw-r--r--deps/v8/src/compiler/generic-algorithm.h120
-rw-r--r--deps/v8/src/compiler/graph-inl.h25
-rw-r--r--deps/v8/src/compiler/graph-visualizer.cc36
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc128
-rw-r--r--deps/v8/src/compiler/ia32/instruction-codes-ia32.h17
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc223
-rw-r--r--deps/v8/src/compiler/ia32/linkage-ia32.cc5
-rw-r--r--deps/v8/src/compiler/instruction-codes.h1
-rw-r--r--deps/v8/src/compiler/instruction-selector-impl.h20
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc393
-rw-r--r--deps/v8/src/compiler/instruction-selector.h20
-rw-r--r--deps/v8/src/compiler/instruction.cc94
-rw-r--r--deps/v8/src/compiler/instruction.h217
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc66
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.h4
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc101
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.h22
-rw-r--r--deps/v8/src/compiler/js-graph.cc2
-rw-r--r--deps/v8/src/compiler/js-inlining.cc223
-rw-r--r--deps/v8/src/compiler/js-inlining.h20
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc241
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h23
-rw-r--r--deps/v8/src/compiler/js-operator.cc47
-rw-r--r--deps/v8/src/compiler/js-operator.h3
-rw-r--r--deps/v8/src/compiler/js-type-feedback.cc256
-rw-r--r--deps/v8/src/compiler/js-type-feedback.h91
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc387
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h2
-rw-r--r--deps/v8/src/compiler/jump-threading.cc42
-rw-r--r--deps/v8/src/compiler/jump-threading.h5
-rw-r--r--deps/v8/src/compiler/linkage-impl.h50
-rw-r--r--deps/v8/src/compiler/linkage.cc25
-rw-r--r--deps/v8/src/compiler/linkage.h5
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.cc200
-rw-r--r--deps/v8/src/compiler/liveness-analyzer.h146
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc117
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.h3
-rw-r--r--deps/v8/src/compiler/machine-operator.cc10
-rw-r--r--deps/v8/src/compiler/machine-operator.h42
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc66
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h9
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc154
-rw-r--r--deps/v8/src/compiler/mips/linkage-mips.cc5
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc162
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h139
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc204
-rw-r--r--deps/v8/src/compiler/mips64/linkage-mips64.cc5
-rw-r--r--deps/v8/src/compiler/move-optimizer.cc135
-rw-r--r--deps/v8/src/compiler/move-optimizer.h2
-rw-r--r--deps/v8/src/compiler/node-matchers.cc17
-rw-r--r--deps/v8/src/compiler/node-matchers.h30
-rw-r--r--deps/v8/src/compiler/node-properties.cc64
-rw-r--r--deps/v8/src/compiler/node-properties.h21
-rw-r--r--deps/v8/src/compiler/node.cc62
-rw-r--r--deps/v8/src/compiler/node.h5
-rw-r--r--deps/v8/src/compiler/opcodes.h248
-rw-r--r--deps/v8/src/compiler/operator-properties.cc56
-rw-r--r--deps/v8/src/compiler/operator-properties.h7
-rw-r--r--deps/v8/src/compiler/operator.h8
-rw-r--r--deps/v8/src/compiler/osr.cc154
-rw-r--r--deps/v8/src/compiler/pipeline.cc105
-rw-r--r--deps/v8/src/compiler/ppc/OWNERS3
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc166
-rw-r--r--deps/v8/src/compiler/ppc/instruction-codes-ppc.h8
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc148
-rw-r--r--deps/v8/src/compiler/ppc/linkage-ppc.cc5
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h33
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.cc23
-rw-r--r--deps/v8/src/compiler/register-allocator-verifier.h2
-rw-r--r--deps/v8/src/compiler/register-allocator.cc359
-rw-r--r--deps/v8/src/compiler/register-allocator.h38
-rw-r--r--deps/v8/src/compiler/schedule.cc48
-rw-r--r--deps/v8/src/compiler/schedule.h53
-rw-r--r--deps/v8/src/compiler/scheduler.cc284
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc305
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.cc35
-rw-r--r--deps/v8/src/compiler/simplified-operator-reducer.h4
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc1
-rw-r--r--deps/v8/src/compiler/simplified-operator.h2
-rw-r--r--deps/v8/src/compiler/state-values-utils.cc317
-rw-r--r--deps/v8/src/compiler/state-values-utils.h120
-rw-r--r--deps/v8/src/compiler/typer.cc195
-rw-r--r--deps/v8/src/compiler/typer.h3
-rw-r--r--deps/v8/src/compiler/verifier.cc72
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc157
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h17
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc291
-rw-r--r--deps/v8/src/compiler/x64/linkage-x64.cc5
-rw-r--r--deps/v8/src/contexts.cc25
-rw-r--r--deps/v8/src/contexts.h29
-rw-r--r--deps/v8/src/conversions.cc51
-rw-r--r--deps/v8/src/conversions.h2
-rw-r--r--deps/v8/src/counters.h69
-rw-r--r--deps/v8/src/cpu-profiler-inl.h10
-rw-r--r--deps/v8/src/cpu-profiler.cc40
-rw-r--r--deps/v8/src/cpu-profiler.h20
-rw-r--r--deps/v8/src/d8.cc36
-rw-r--r--deps/v8/src/date.js161
-rw-r--r--deps/v8/src/debug-debugger.js25
-rw-r--r--deps/v8/src/debug.cc603
-rw-r--r--deps/v8/src/debug.h219
-rw-r--r--deps/v8/src/deoptimizer.cc34
-rw-r--r--deps/v8/src/deoptimizer.h34
-rw-r--r--deps/v8/src/disassembler.cc28
-rw-r--r--deps/v8/src/elements.cc4
-rw-r--r--deps/v8/src/execution.cc23
-rw-r--r--deps/v8/src/execution.h4
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc5
-rw-r--r--deps/v8/src/factory.cc163
-rw-r--r--deps/v8/src/factory.h62
-rw-r--r--deps/v8/src/flag-definitions.h103
-rw-r--r--deps/v8/src/flags.cc23
-rw-r--r--deps/v8/src/flags.h3
-rw-r--r--deps/v8/src/frames-inl.h52
-rw-r--r--deps/v8/src/frames.cc171
-rw-r--r--deps/v8/src/frames.h84
-rw-r--r--deps/v8/src/full-codegen.cc128
-rw-r--r--deps/v8/src/full-codegen.h121
-rw-r--r--deps/v8/src/gdb-jit.cc350
-rw-r--r--deps/v8/src/gdb-jit.h61
-rw-r--r--deps/v8/src/global-handles.cc141
-rw-r--r--deps/v8/src/global-handles.h29
-rw-r--r--deps/v8/src/globals.h73
-rw-r--r--deps/v8/src/harmony-array.js2
-rw-r--r--deps/v8/src/harmony-reflect.js18
-rw-r--r--deps/v8/src/harmony-string.js194
-rw-r--r--deps/v8/src/harmony-tostring.js3
-rw-r--r--deps/v8/src/heap-profiler.cc17
-rw-r--r--deps/v8/src/heap-profiler.h9
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc124
-rw-r--r--deps/v8/src/heap-snapshot-generator.h31
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.cc63
-rw-r--r--deps/v8/src/heap/gc-idle-time-handler.h30
-rw-r--r--deps/v8/src/heap/heap-inl.h23
-rw-r--r--deps/v8/src/heap/heap.cc564
-rw-r--r--deps/v8/src/heap/heap.h104
-rw-r--r--deps/v8/src/heap/incremental-marking.cc82
-rw-r--r--deps/v8/src/heap/incremental-marking.h18
-rw-r--r--deps/v8/src/heap/mark-compact.cc607
-rw-r--r--deps/v8/src/heap/mark-compact.h38
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h31
-rw-r--r--deps/v8/src/heap/objects-visiting.cc82
-rw-r--r--deps/v8/src/heap/objects-visiting.h6
-rw-r--r--deps/v8/src/heap/spaces.cc47
-rw-r--r--deps/v8/src/heap/spaces.h32
-rw-r--r--deps/v8/src/heap/store-buffer-inl.h8
-rw-r--r--deps/v8/src/heap/store-buffer.cc192
-rw-r--r--deps/v8/src/heap/store-buffer.h37
-rw-r--r--deps/v8/src/hydrogen-bce.cc2
-rw-r--r--deps/v8/src/hydrogen-gvn.cc41
-rw-r--r--deps/v8/src/hydrogen-gvn.h4
-rw-r--r--deps/v8/src/hydrogen-instructions.cc66
-rw-r--r--deps/v8/src/hydrogen-instructions.h183
-rw-r--r--deps/v8/src/hydrogen-representation-changes.cc2
-rw-r--r--deps/v8/src/hydrogen.cc537
-rw-r--r--deps/v8/src/hydrogen.h332
-rw-r--r--deps/v8/src/i18n.cc24
-rw-r--r--deps/v8/src/i18n.js141
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h29
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc47
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h44
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc266
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc371
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc73
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc2
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc30
-rw-r--r--deps/v8/src/ia32/frames-ia32.h5
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc212
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc6
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc148
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc15
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h22
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc191
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h32
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc45
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc46
-rw-r--r--deps/v8/src/ic/handler-compiler.cc33
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc39
-rw-r--r--deps/v8/src/ic/ic-compiler.cc11
-rw-r--r--deps/v8/src/ic/ic-compiler.h1
-rw-r--r--deps/v8/src/ic/ic-state.cc38
-rw-r--r--deps/v8/src/ic/ic-state.h1
-rw-r--r--deps/v8/src/ic/ic.cc187
-rw-r--r--deps/v8/src/ic/ic.h19
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc45
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc45
-rw-r--r--deps/v8/src/ic/mips64/ic-mips64.cc4
-rw-r--r--deps/v8/src/ic/ppc/OWNERS3
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc81
-rw-r--r--deps/v8/src/ic/ppc/ic-compiler-ppc.cc16
-rw-r--r--deps/v8/src/ic/stub-cache.cc4
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc40
-rw-r--r--deps/v8/src/ic/x87/handler-compiler-x87.cc39
-rw-r--r--deps/v8/src/interface-descriptors.h7
-rw-r--r--deps/v8/src/isolate.cc786
-rw-r--r--deps/v8/src/isolate.h162
-rw-r--r--deps/v8/src/json-parser.h73
-rw-r--r--deps/v8/src/json-stringifier.h5
-rw-r--r--deps/v8/src/json.js2
-rw-r--r--deps/v8/src/jsregexp.cc6
-rw-r--r--deps/v8/src/layout-descriptor-inl.h84
-rw-r--r--deps/v8/src/layout-descriptor.cc93
-rw-r--r--deps/v8/src/layout-descriptor.h24
-rw-r--r--deps/v8/src/lithium-codegen.cc11
-rw-r--r--deps/v8/src/lithium-codegen.h2
-rw-r--r--deps/v8/src/lithium.cc5
-rw-r--r--deps/v8/src/liveedit.cc18
-rw-r--r--deps/v8/src/log.cc47
-rw-r--r--deps/v8/src/log.h6
-rw-r--r--deps/v8/src/lookup-inl.h41
-rw-r--r--deps/v8/src/lookup.cc112
-rw-r--r--deps/v8/src/lookup.h54
-rw-r--r--deps/v8/src/macros.py6
-rw-r--r--deps/v8/src/math.js250
-rw-r--r--deps/v8/src/messages.js110
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h87
-rw-r--r--deps/v8/src/mips/assembler-mips.cc40
-rw-r--r--deps/v8/src/mips/assembler-mips.h18
-rw-r--r--deps/v8/src/mips/builtins-mips.cc229
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc400
-rw-r--r--deps/v8/src/mips/debug-mips.cc42
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc2
-rw-r--r--deps/v8/src/mips/frames-mips.h5
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc213
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc6
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc144
-rw-r--r--deps/v8/src/mips/lithium-mips.cc18
-rw-r--r--deps/v8/src/mips/lithium-mips.h24
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc158
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h30
-rw-r--r--deps/v8/src/mips/simulator-mips.cc4
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h94
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc42
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h31
-rw-r--r--deps/v8/src/mips64/builtins-mips64.cc229
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc402
-rw-r--r--deps/v8/src/mips64/debug-mips64.cc42
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc75
-rw-r--r--deps/v8/src/mips64/frames-mips64.h5
-rw-r--r--deps/v8/src/mips64/full-codegen-mips64.cc211
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc6
-rw-r--r--deps/v8/src/mips64/lithium-codegen-mips64.cc154
-rw-r--r--deps/v8/src/mips64/lithium-mips64.cc18
-rw-r--r--deps/v8/src/mips64/lithium-mips64.h24
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc242
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h35
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc18
-rw-r--r--deps/v8/src/mirror-debugger.js2
-rw-r--r--deps/v8/src/modules.cc33
-rw-r--r--deps/v8/src/modules.h12
-rw-r--r--deps/v8/src/object-observe.js2
-rw-r--r--deps/v8/src/objects-debug.cc35
-rw-r--r--deps/v8/src/objects-inl.h375
-rw-r--r--deps/v8/src/objects-printer.cc38
-rw-r--r--deps/v8/src/objects.cc1700
-rw-r--r--deps/v8/src/objects.h460
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc109
-rw-r--r--deps/v8/src/optimizing-compiler-thread.h16
-rw-r--r--deps/v8/src/parser.cc850
-rw-r--r--deps/v8/src/parser.h261
-rw-r--r--deps/v8/src/pending-compilation-error-handler.cc64
-rw-r--r--deps/v8/src/pending-compilation-error-handler.h91
-rw-r--r--deps/v8/src/ppc/OWNERS3
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h172
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc747
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h220
-rw-r--r--deps/v8/src/ppc/builtins-ppc.cc298
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc538
-rw-r--r--deps/v8/src/ppc/codegen-ppc.cc6
-rw-r--r--deps/v8/src/ppc/debug-ppc.cc47
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc10
-rw-r--r--deps/v8/src/ppc/disasm-ppc.cc9
-rw-r--r--deps/v8/src/ppc/frames-ppc.cc16
-rw-r--r--deps/v8/src/ppc/frames-ppc.h15
-rw-r--r--deps/v8/src/ppc/full-codegen-ppc.cc342
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc6
-rw-r--r--deps/v8/src/ppc/lithium-codegen-ppc.cc193
-rw-r--r--deps/v8/src/ppc/lithium-ppc.cc23
-rw-r--r--deps/v8/src/ppc/lithium-ppc.h32
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc354
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.h45
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc22
-rw-r--r--deps/v8/src/preparse-data-format.h2
-rw-r--r--deps/v8/src/preparse-data.cc11
-rw-r--r--deps/v8/src/preparse-data.h63
-rw-r--r--deps/v8/src/preparser.cc62
-rw-r--r--deps/v8/src/preparser.h219
-rw-r--r--deps/v8/src/prettyprinter.cc5
-rw-r--r--deps/v8/src/profile-generator-inl.h9
-rw-r--r--deps/v8/src/profile-generator.cc236
-rw-r--r--deps/v8/src/profile-generator.h118
-rw-r--r--deps/v8/src/promise.js61
-rw-r--r--deps/v8/src/property-details.h53
-rw-r--r--deps/v8/src/property.h10
-rw-r--r--deps/v8/src/regexp.js317
-rw-r--r--deps/v8/src/rewriter.cc7
-rw-r--r--deps/v8/src/rewriter.h4
-rw-r--r--deps/v8/src/runtime.js102
-rw-r--r--deps/v8/src/runtime/runtime-array.cc32
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc50
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc1
-rw-r--r--deps/v8/src/runtime/runtime-date.cc3
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc202
-rw-r--r--deps/v8/src/runtime/runtime-function.cc11
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc37
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc55
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc12
-rw-r--r--deps/v8/src/runtime/runtime-liveedit.cc5
-rw-r--r--deps/v8/src/runtime/runtime-maths.cc32
-rw-r--r--deps/v8/src/runtime/runtime-numbers.cc6
-rw-r--r--deps/v8/src/runtime/runtime-object.cc108
-rw-r--r--deps/v8/src/runtime/runtime-proxy.cc2
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc35
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc55
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc100
-rw-r--r--deps/v8/src/runtime/runtime-test.cc61
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc52
-rw-r--r--deps/v8/src/runtime/runtime.cc42
-rw-r--r--deps/v8/src/runtime/runtime.h466
-rw-r--r--deps/v8/src/scanner.cc253
-rw-r--r--deps/v8/src/scanner.h35
-rw-r--r--deps/v8/src/scopeinfo.cc95
-rw-r--r--deps/v8/src/scopes.cc185
-rw-r--r--deps/v8/src/scopes.h73
-rw-r--r--deps/v8/src/snapshot/DEPS5
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc (renamed from deps/v8/src/mksnapshot.cc)21
-rw-r--r--deps/v8/src/snapshot/natives-external.cc (renamed from deps/v8/src/natives-external.cc)45
-rw-r--r--deps/v8/src/snapshot/natives.h (renamed from deps/v8/src/natives.h)2
-rw-r--r--deps/v8/src/snapshot/serialize.cc (renamed from deps/v8/src/serialize.cc)948
-rw-r--r--deps/v8/src/snapshot/serialize.h (renamed from deps/v8/src/serialize.h)376
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc (renamed from deps/v8/src/snapshot-common.cc)63
-rw-r--r--deps/v8/src/snapshot/snapshot-empty.cc (renamed from deps/v8/src/snapshot-empty.cc)8
-rw-r--r--deps/v8/src/snapshot/snapshot-external.cc (renamed from deps/v8/src/snapshot-external.cc)10
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.cc (renamed from deps/v8/src/snapshot-source-sink.cc)16
-rw-r--r--deps/v8/src/snapshot/snapshot-source-sink.h (renamed from deps/v8/src/snapshot-source-sink.h)12
-rw-r--r--deps/v8/src/snapshot/snapshot.h (renamed from deps/v8/src/snapshot.h)14
-rw-r--r--deps/v8/src/string-iterator.js52
-rw-r--r--deps/v8/src/string.js387
-rw-r--r--deps/v8/src/strings-storage.cc123
-rw-r--r--deps/v8/src/strings-storage.h47
-rw-r--r--deps/v8/src/symbol.js100
-rw-r--r--deps/v8/src/templates.js (renamed from deps/v8/src/harmony-templates.js)34
-rw-r--r--deps/v8/src/third_party/fdlibm/fdlibm.js90
-rw-r--r--deps/v8/src/token.h3
-rw-r--r--deps/v8/src/transitions-inl.h117
-rw-r--r--deps/v8/src/transitions.cc523
-rw-r--r--deps/v8/src/transitions.h298
-rw-r--r--deps/v8/src/type-feedback-vector.cc224
-rw-r--r--deps/v8/src/type-feedback-vector.h83
-rw-r--r--deps/v8/src/type-info.cc73
-rw-r--r--deps/v8/src/type-info.h24
-rw-r--r--deps/v8/src/typedarray.js8
-rw-r--r--deps/v8/src/types.cc17
-rw-r--r--deps/v8/src/types.h3
-rw-r--r--deps/v8/src/typing.cc22
-rw-r--r--deps/v8/src/unique.h7
-rw-r--r--deps/v8/src/utils.h2
-rw-r--r--deps/v8/src/v8.cc6
-rw-r--r--deps/v8/src/v8natives.js73
-rw-r--r--deps/v8/src/variables.cc8
-rw-r--r--deps/v8/src/variables.h36
-rw-r--r--deps/v8/src/weak-collection.js67
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h29
-rw-r--r--deps/v8/src/x64/assembler-x64.cc171
-rw-r--r--deps/v8/src/x64/assembler-x64.h53
-rw-r--r--deps/v8/src/x64/builtins-x64.cc242
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc364
-rw-r--r--deps/v8/src/x64/debug-x64.cc69
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc3
-rw-r--r--deps/v8/src/x64/disasm-x64.cc35
-rw-r--r--deps/v8/src/x64/frames-x64.h9
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc203
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc6
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc154
-rw-r--r--deps/v8/src/x64/lithium-x64.cc18
-rw-r--r--deps/v8/src/x64/lithium-x64.h24
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc392
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h50
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc8
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h29
-rw-r--r--deps/v8/src/x87/assembler-x87.cc14
-rw-r--r--deps/v8/src/x87/assembler-x87.h9
-rw-r--r--deps/v8/src/x87/builtins-x87.cc266
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc381
-rw-r--r--deps/v8/src/x87/debug-x87.cc72
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc2
-rw-r--r--deps/v8/src/x87/frames-x87.h5
-rw-r--r--deps/v8/src/x87/full-codegen-x87.cc211
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc6
-rw-r--r--deps/v8/src/x87/lithium-codegen-x87.cc151
-rw-r--r--deps/v8/src/x87/lithium-x87.cc15
-rw-r--r--deps/v8/src/x87/lithium-x87.h22
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc156
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.h25
-rw-r--r--deps/v8/test/cctest/cctest.gyp6
-rw-r--r--deps/v8/test/cctest/cctest.h3
-rw-r--r--deps/v8/test/cctest/cctest.status73
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h15
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.h22
-rw-r--r--deps/v8/test/cctest/compiler/simplified-graph-builder.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-codegen-deopt.cc28
-rw-r--r--deps/v8/test/cctest/compiler/test-control-reducer.cc53
-rw-r--r--deps/v8/test/cctest/compiler/test-instruction.cc55
-rw-r--r--deps/v8/test/cctest/compiler/test-js-typed-lowering.cc85
-rw-r--r--deps/v8/test/cctest/compiler/test-jump-threading.cc16
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc23
-rw-r--r--deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc11
-rw-r--r--deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-node-algorithm.cc40
-rw-r--r--deps/v8/test/cctest/compiler/test-node.cc627
-rw-r--r--deps/v8/test/cctest/compiler/test-osr.cc111
-rw-r--r--deps/v8/test/cctest/compiler/test-pipeline.cc8
-rw-r--r--deps/v8/test/cctest/compiler/test-run-inlining.cc99
-rw-r--r--deps/v8/test/cctest/compiler/test-run-intrinsics.cc279
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsexceptions.cc139
-rw-r--r--deps/v8/test/cctest/compiler/test-run-jsops.cc2
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc104
-rw-r--r--deps/v8/test/cctest/compiler/test-run-stubs.cc106
-rw-r--r--deps/v8/test/cctest/compiler/test-run-variables.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-simplified-lowering.cc107
-rw-r--r--deps/v8/test/cctest/test-accessors.cc83
-rw-r--r--deps/v8/test/cctest/test-alloc.cc4
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc239
-rw-r--r--deps/v8/test/cctest/test-api.cc1068
-rw-r--r--deps/v8/test/cctest/test-array-list.cc41
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc170
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc1
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc4
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc1
-rw-r--r--deps/v8/test/cctest/test-assembler-x87.cc1
-rw-r--r--deps/v8/test/cctest/test-compiler.cc2
-rw-r--r--deps/v8/test/cctest/test-constantpool.cc3
-rw-r--r--deps/v8/test/cctest/test-conversions.cc55
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc519
-rw-r--r--deps/v8/test/cctest/test-date.cc1
-rw-r--r--deps/v8/test/cctest/test-debug.cc61
-rw-r--r--deps/v8/test/cctest/test-decls.cc117
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc1
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc14
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc1
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc1
-rw-r--r--deps/v8/test/cctest/test-disasm-ppc.cc1
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc21
-rw-r--r--deps/v8/test/cctest/test-disasm-x87.cc1
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc59
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc8
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc224
-rw-r--r--deps/v8/test/cctest/test-heap.cc344
-rw-r--r--deps/v8/test/cctest/test-javascript-arm64.cc1
-rw-r--r--deps/v8/test/cctest/test-js-arm64-variables.cc1
-rw-r--r--deps/v8/test/cctest/test-lockers.cc1
-rw-r--r--deps/v8/test/cctest/test-log.cc2
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-ia32.cc1
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc35
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x87.cc1
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc11
-rw-r--r--deps/v8/test/cctest/test-mementos.cc17
-rw-r--r--deps/v8/test/cctest/test-migrations.cc232
-rw-r--r--deps/v8/test/cctest/test-parsing.cc536
-rw-r--r--deps/v8/test/cctest/test-serialize.cc975
-rw-r--r--deps/v8/test/cctest/test-spaces.cc30
-rw-r--r--deps/v8/test/cctest/test-strings.cc2
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc65
-rw-r--r--deps/v8/test/cctest/test-transitions.cc197
-rw-r--r--deps/v8/test/cctest/test-typedarrays.cc81
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc470
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc7
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc7
-rw-r--r--deps/v8/test/cctest/test-weaktypedarrays.cc61
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json2
-rw-r--r--deps/v8/test/message/class-constructor-accessor.js (renamed from deps/v8/test/mjsunit/harmony/disable-harmony-string.js)9
-rw-r--r--deps/v8/test/message/class-constructor-accessor.out7
-rw-r--r--deps/v8/test/message/class-constructor-generator.js10
-rw-r--r--deps/v8/test/message/class-constructor-generator.out7
-rw-r--r--deps/v8/test/message/export-duplicate-as.js9
-rw-r--r--deps/v8/test/message/export-duplicate-as.out7
-rw-r--r--deps/v8/test/message/export-duplicate-default.js8
-rw-r--r--deps/v8/test/message/export-duplicate-default.out7
-rw-r--r--deps/v8/test/message/export-duplicate.js9
-rw-r--r--deps/v8/test/message/export-duplicate.out7
-rw-r--r--deps/v8/test/message/import-as-eval.js7
-rw-r--r--deps/v8/test/message/import-as-eval.out7
-rw-r--r--deps/v8/test/message/import-as-redeclaration.js8
-rw-r--r--deps/v8/test/message/import-as-redeclaration.out7
-rw-r--r--deps/v8/test/message/import-as-reserved-word.js7
-rw-r--r--deps/v8/test/message/import-as-reserved-word.out7
-rw-r--r--deps/v8/test/message/import-eval.js7
-rw-r--r--deps/v8/test/message/import-eval.out7
-rw-r--r--deps/v8/test/message/import-redeclaration.js8
-rw-r--r--deps/v8/test/message/import-redeclaration.out7
-rw-r--r--deps/v8/test/message/import-reserved-word.js7
-rw-r--r--deps/v8/test/message/import-reserved-word.out7
-rw-r--r--deps/v8/test/message/testcfg.py3
-rw-r--r--deps/v8/test/message/unterminated-arg-list.js7
-rw-r--r--deps/v8/test/message/unterminated-arg-list.out8
-rw-r--r--deps/v8/test/mjsunit/asm/construct-double.js33
-rw-r--r--deps/v8/test/mjsunit/asm/double-hi.js40
-rw-r--r--deps/v8/test/mjsunit/asm/double-lo.js40
-rw-r--r--deps/v8/test/mjsunit/asm/if-cloning.js34
-rw-r--r--deps/v8/test/mjsunit/asm/math-clz32.js31
-rw-r--r--deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-tonumber-binop.js40
-rw-r--r--deps/v8/test/mjsunit/compiler/eager-deopt-simple.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-forin-nested.js35
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-infinite.js78
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-labeled.js47
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-literals-adapted.js56
-rw-r--r--deps/v8/test/mjsunit/compiler/osr-literals.js56
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-463056.js9
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-468727.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-469089.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/truncating-store-deopt.js28
-rw-r--r--deps/v8/test/mjsunit/compiler/try-deopt.js56
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js2
-rw-r--r--deps/v8/test/mjsunit/debug-allscopes-on-debugger.js58
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-check-stack.js6
-rw-r--r--deps/v8/test/mjsunit/debug-references.js5
-rw-r--r--deps/v8/test/mjsunit/debug-scopes.js24
-rw-r--r--deps/v8/test/mjsunit/debug-set-variable-value.js15
-rw-r--r--deps/v8/test/mjsunit/debug-sourceinfo.js618
-rw-r--r--deps/v8/test/mjsunit/debug-step-turbofan.js2
-rw-r--r--deps/v8/test/mjsunit/debug-stepframe-clearing.js97
-rw-r--r--deps/v8/test/mjsunit/debug-stepin-foreach.js8
-rw-r--r--deps/v8/test/mjsunit/es6/block-conflicts.js (renamed from deps/v8/test/mjsunit/harmony/block-conflicts.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-const-assign.js160
-rw-r--r--deps/v8/test/mjsunit/es6/block-early-errors.js (renamed from deps/v8/test/mjsunit/harmony/block-early-errors.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-for.js (renamed from deps/v8/test/mjsunit/harmony/block-for.js)5
-rw-r--r--deps/v8/test/mjsunit/es6/block-leave.js (renamed from deps/v8/test/mjsunit/harmony/block-leave.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-crankshaft.js (renamed from deps/v8/test/mjsunit/harmony/block-let-crankshaft.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-declaration.js (renamed from deps/v8/test/mjsunit/harmony/block-let-declaration.js)26
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-semantics.js (renamed from deps/v8/test/mjsunit/harmony/block-let-semantics.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-non-strict-errors.js (renamed from deps/v8/test/mjsunit/harmony/block-non-strict-errors.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/block-scoping.js (renamed from deps/v8/test/mjsunit/harmony/block-scoping.js)8
-rw-r--r--deps/v8/test/mjsunit/es6/collections.js122
-rw-r--r--deps/v8/test/mjsunit/es6/debug-blockscopes.js (renamed from deps/v8/test/mjsunit/harmony/debug-blockscopes.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-evaluate-blockscopes.js (renamed from deps/v8/test/mjsunit/harmony/debug-evaluate-blockscopes.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-function-scopes.js (renamed from deps/v8/test/mjsunit/harmony/debug-function-scopes.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js32
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js29
-rw-r--r--deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js30
-rw-r--r--deps/v8/test/mjsunit/es6/debug-stepin-promises.js65
-rw-r--r--deps/v8/test/mjsunit/es6/empty-for.js (renamed from deps/v8/test/mjsunit/harmony/empty-for.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/function-length-configurable.js119
-rw-r--r--deps/v8/test/mjsunit/es6/function-name-configurable.js115
-rw-r--r--deps/v8/test/mjsunit/es6/generators-debug-liveedit.js4
-rw-r--r--deps/v8/test/mjsunit/es6/generators-objects.js2
-rw-r--r--deps/v8/test/mjsunit/es6/indexed-integer-exotics.js63
-rw-r--r--deps/v8/test/mjsunit/es6/iteration-semantics.js32
-rw-r--r--deps/v8/test/mjsunit/es6/iteration-syntax.js2
-rw-r--r--deps/v8/test/mjsunit/es6/map-minus-zero.js51
-rw-r--r--deps/v8/test/mjsunit/es6/promises.js200
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2243.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-2243.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2322.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-2322.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2506.js (renamed from deps/v8/test/mjsunit/regress/regress-2506.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-2858.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-2858.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-3426.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-3426.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-347906.js (renamed from deps/v8/test/mjsunit/regress/regress-347906.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-3683.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-3683.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-3741.js (renamed from deps/v8/test/mjsunit/harmony/regress/regress-3741.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-3938.js8
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-411237.js (renamed from deps/v8/test/mjsunit/regress/regress-411237.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-468661.js75
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-474783.js24
-rw-r--r--deps/v8/test/mjsunit/es6/set-minus-zero.js51
-rw-r--r--deps/v8/test/mjsunit/es6/string-codepointat.js (renamed from deps/v8/test/mjsunit/harmony/string-codepointat.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/string-endswith.js (renamed from deps/v8/test/mjsunit/harmony/string-endswith.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/string-fromcodepoint.js (renamed from deps/v8/test/mjsunit/harmony/string-fromcodepoint.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/string-includes.js (renamed from deps/v8/test/mjsunit/harmony/string-includes.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/string-raw.js (renamed from deps/v8/test/mjsunit/harmony/string-raw.js)38
-rw-r--r--deps/v8/test/mjsunit/es6/string-repeat.js (renamed from deps/v8/test/mjsunit/harmony/string-repeat.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/string-startswith.js (renamed from deps/v8/test/mjsunit/harmony/string-startswith.js)2
-rw-r--r--deps/v8/test/mjsunit/es6/symbols.js5
-rw-r--r--deps/v8/test/mjsunit/es6/templates.js (renamed from deps/v8/test/mjsunit/harmony/templates.js)165
-rw-r--r--deps/v8/test/mjsunit/es7/object-observe.js67
-rw-r--r--deps/v8/test/mjsunit/function-length-accessor.js2
-rw-r--r--deps/v8/test/mjsunit/function-prototype.js3
-rw-r--r--deps/v8/test/mjsunit/harmony/block-const-assign.js137
-rw-r--r--deps/v8/test/mjsunit/harmony/computed-property-names-classes.js131
-rw-r--r--deps/v8/test/mjsunit/harmony/computed-property-names.js23
-rw-r--r--deps/v8/test/mjsunit/harmony/module-linking.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/module-resolution.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/private.js5
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-apply.js212
-rw-r--r--deps/v8/test/mjsunit/harmony/reflect-construct.js277
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-3501.js (renamed from deps/v8/test/mjsunit/regress/regress-3501.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-448730.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js (renamed from deps/v8/test/mjsunit/regress/regress-crbug-451770.js)0
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js18
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-465671-null.js16
-rw-r--r--deps/v8/test/mjsunit/harmony/regress/regress-crbug-465671.js16
-rw-r--r--deps/v8/test/mjsunit/harmony/rest-params.js34
-rw-r--r--deps/v8/test/mjsunit/harmony/typedarrays.js4
-rw-r--r--deps/v8/test/mjsunit/json2.js5
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status48
-rw-r--r--deps/v8/test/mjsunit/regexp-stack-overflow.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1530.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-270142.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-330046.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3960.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3969.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3976.js80
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3985.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4023.js67
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4027.js60
-rw-r--r--deps/v8/test/mjsunit/regress/regress-430201b.js (renamed from deps/v8/test/preparser/strict-const.js)22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-460937.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-463028.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-469605.js43
-rw-r--r--deps/v8/test/mjsunit/regress/regress-470804.js53
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-385002.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-401915.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-465564.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-467047.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-467531.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-filter-contexts.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-function-length-strict.js2
-rw-r--r--deps/v8/test/mjsunit/regress/string-compare-memcmp.js2
-rw-r--r--deps/v8/test/mjsunit/stack-traces.js34
-rw-r--r--deps/v8/test/mjsunit/strict-mode.js16
-rw-r--r--deps/v8/test/mjsunit/string-concat.js14
-rw-r--r--deps/v8/test/mjsunit/string-index.js16
-rw-r--r--deps/v8/test/mjsunit/strong/arrays.js12
-rw-r--r--deps/v8/test/mjsunit/strong/classes.js47
-rw-r--r--deps/v8/test/mjsunit/strong/declaration-after-use.js258
-rw-r--r--deps/v8/test/mjsunit/strong/functions.js68
-rw-r--r--deps/v8/test/mozilla/mozilla.status13
-rw-r--r--deps/v8/test/preparser/strict-function-statement.pyt109
-rw-r--r--deps/v8/test/test262-es6/test262-es6.status324
-rw-r--r--deps/v8/test/test262-es6/testcfg.py3
-rw-r--r--deps/v8/test/test262/test262.status127
-rw-r--r--deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc15
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc137
-rw-r--r--deps/v8/test/unittests/compiler/change-lowering-unittest.cc456
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc64
-rw-r--r--deps/v8/test/unittests/compiler/common-operator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/control-equivalence-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc80
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc44
-rw-r--r--deps/v8/test/unittests/compiler/instruction-selector-unittest.cc49
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc121
-rw-r--r--deps/v8/test/unittests/compiler/instruction-sequence-unittest.h52
-rw-r--r--deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc125
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc148
-rw-r--r--deps/v8/test/unittests/compiler/js-operator-unittest.cc83
-rw-r--r--deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc70
-rw-r--r--deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc373
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc149
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-unittest.cc44
-rw-r--r--deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc15
-rw-r--r--deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc36
-rw-r--r--deps/v8/test/unittests/compiler/move-optimizer-unittest.cc53
-rw-r--r--deps/v8/test/unittests/compiler/node-properties-unittest.cc72
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc39
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h16
-rw-r--r--deps/v8/test/unittests/compiler/node-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/opcodes-unittest.cc25
-rw-r--r--deps/v8/test/unittests/compiler/ppc/OWNERS3
-rw-r--r--deps/v8/test/unittests/compiler/register-allocator-unittest.cc106
-rw-r--r--deps/v8/test/unittests/compiler/schedule-unittest.cc33
-rw-r--r--deps/v8/test/unittests/compiler/scheduler-unittest.cc303
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc33
-rw-r--r--deps/v8/test/unittests/compiler/simplified-operator-unittest.cc1
-rw-r--r--deps/v8/test/unittests/compiler/state-values-utils-unittest.cc149
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc34
-rw-r--r--deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc52
-rw-r--r--deps/v8/test/unittests/unittests.gyp2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/prototype_length-expected.txt2
-rw-r--r--deps/v8/test/webkit/fast/js/kde/prototype_length.js2
-rw-r--r--deps/v8/test/webkit/webkit.status6
-rw-r--r--deps/v8/testing/commit_queue/OWNERS1
-rw-r--r--deps/v8/testing/commit_queue/config.json74
-rwxr-xr-xdeps/v8/tools/check-name-clashes.py3
-rwxr-xr-xdeps/v8/tools/cpu.sh2
-rw-r--r--deps/v8/tools/external-reference-check.py2
-rw-r--r--deps/v8/tools/gyp/v8.gyp49
-rwxr-xr-xdeps/v8/tools/js2c.py2
-rwxr-xr-xdeps/v8/tools/ll_prof.py23
-rw-r--r--deps/v8/tools/parser-shell.cc24
-rwxr-xr-xdeps/v8/tools/perf-to-html.py378
-rwxr-xr-xdeps/v8/tools/release/auto_push.py19
-rwxr-xr-xdeps/v8/tools/release/auto_roll.py61
-rwxr-xr-xdeps/v8/tools/release/chromium_roll.py58
-rw-r--r--deps/v8/tools/release/common_includes.py63
-rwxr-xr-xdeps/v8/tools/release/create_release.py24
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py4
-rwxr-xr-xdeps/v8/tools/release/releases.py4
-rw-r--r--deps/v8/tools/release/test_scripts.py169
-rwxr-xr-xdeps/v8/tools/run-tests.py2
-rwxr-xr-xdeps/v8/tools/run_perf.py16
-rwxr-xr-xdeps/v8/tools/test-push-to-trunk.sh246
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py5
-rwxr-xr-xdeps/v8/tools/v8-info.sh10
822 files changed, 43421 insertions, 27121 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 2eac3035c3..cc424333d3 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -24,6 +24,7 @@
.cproject
.d8_history
.gclient_entries
+.landmines
.project
.pydevproject
.settings
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 39fb88c57b..5b976b8b79 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -64,6 +64,7 @@ Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Jonathan Liu <net147@gmail.com>
+JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 713ab6de57..fc0ea8eb68 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -2,6 +2,10 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+import("//build/config/android/config.gni")
+import("//build/config/arm.gni")
+import("//build/config/mips.gni")
+
# Because standalone V8 builds are not supported, assume this is part of a
# Chromium build.
import("//build/module_args/v8.gni")
@@ -18,10 +22,23 @@ v8_interpreted_regexp = false
v8_object_print = false
v8_postmortem_support = false
v8_use_snapshot = true
-v8_target_arch = cpu_arch
+v8_target_arch = target_cpu
v8_random_seed = "314159265"
v8_toolset_for_d8 = "host"
+# The snapshot needs to be compiled for the host, but compiled with
+# a toolchain that matches the bit-width of the target.
+#
+# TODO(GYP): For now we only support 32-bit little-endian target builds from an
+# x64 Linux host. Eventually we need to support all of the host/target
+# configurations v8 runs on.
+if (host_cpu == "x64" && host_os == "linux" &&
+ (target_cpu == "arm" || target_cpu == "mipsel" || target_cpu == "x86")) {
+ snapshot_toolchain = "//build/toolchain/linux:clang_x86"
+} else {
+ snapshot_toolchain = default_toolchain
+}
+
###############################################################################
# Configurations
#
@@ -96,37 +113,49 @@ config("toolchain") {
defines = []
cflags = []
- # TODO(jochen): Add support for arm subarchs, mips, mipsel.
+ # TODO(jochen): Add support for arm subarchs, mips, mipsel, mips64el.
if (v8_target_arch == "arm") {
defines += [ "V8_TARGET_ARCH_ARM" ]
- if (arm_version == 7) {
- defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
- }
- if (arm_fpu == "vfpv3-d16") {
- defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ]
- }
- if (arm_fpu == "vfpv3") {
- defines += [
- "CAN_USE_VFP3_INSTRUCTIONS",
- "CAN_USE_VFP32DREGS",
- ]
- }
- if (arm_fpu == "neon") {
+ if (current_cpu == "arm") {
+ if (arm_version == 7) {
+ defines += [ "CAN_USE_ARMV7_INSTRUCTIONS" ]
+ }
+ if (arm_fpu == "vfpv3-d16") {
+ defines += [ "CAN_USE_VFP3_INSTRUCTIONS" ]
+ } else if (arm_fpu == "vfpv3") {
+ defines += [
+ "CAN_USE_VFP3_INSTRUCTIONS",
+ "CAN_USE_VFP32DREGS",
+ ]
+ } else if (arm_fpu == "neon") {
+ defines += [
+ "CAN_USE_VFP3_INSTRUCTIONS",
+ "CAN_USE_VFP32DREGS",
+ "CAN_USE_NEON",
+ ]
+ }
+ } else {
+ # These defines ares used for the ARM simulator.
defines += [
+ "CAN_USE_ARMV7_INSTRUCTIONS",
"CAN_USE_VFP3_INSTRUCTIONS",
"CAN_USE_VFP32DREGS",
- "CAN_USE_NEON",
+ "USE_EABI_HARDFLOAT=0",
]
}
# TODO(jochen): Add support for arm_test_noprobe.
-
- # TODO(jochen): Add support for cpu_arch != v8_target_arch/
}
if (v8_target_arch == "arm64") {
defines += [ "V8_TARGET_ARCH_ARM64" ]
}
+ if (v8_target_arch == "mipsel") {
+ defines += [ "V8_TARGET_ARCH_MIPS" ]
+ }
+ if (v8_target_arch == "mips64el") {
+ defines += [ "V8_TARGET_ARCH_MIPS64" ]
+ }
if (v8_target_arch == "x86") {
defines += [ "V8_TARGET_ARCH_IA32" ]
}
@@ -173,8 +202,8 @@ action("js2c") {
"src/array.js",
"src/string.js",
"src/uri.js",
- "src/third_party/fdlibm/fdlibm.js",
"src/math.js",
+ "src/third_party/fdlibm/fdlibm.js",
"src/date.js",
"src/regexp.js",
"src/arraybuffer.js",
@@ -192,6 +221,7 @@ action("js2c") {
"src/debug-debugger.js",
"src/mirror-debugger.js",
"src/liveedit-debugger.js",
+ "src/templates.js",
"src/macros.py",
]
@@ -230,13 +260,12 @@ action("js2c_experimental") {
"src/macros.py",
"src/proxy.js",
"src/generator.js",
- "src/harmony-string.js",
"src/harmony-array.js",
"src/harmony-array-includes.js",
"src/harmony-typedarray.js",
"src/harmony-tostring.js",
- "src/harmony-templates.js",
"src/harmony-regexp.js",
+ "src/harmony-reflect.js"
]
outputs = [
@@ -322,7 +351,7 @@ action("run_mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
- ":mksnapshot($host_toolchain)",
+ ":mksnapshot($snapshot_toolchain)",
]
script = "tools/run.py"
@@ -332,7 +361,7 @@ action("run_mksnapshot") {
]
args = [
- "./" + rebase_path(get_label_info(":mksnapshot($host_toolchain)",
+ "./" + rebase_path(get_label_info(":mksnapshot($snapshot_toolchain)",
"root_out_dir") + "/mksnapshot",
root_build_dir),
"--log-snapshot-positions",
@@ -373,7 +402,7 @@ source_set("v8_nosnapshot") {
sources = [
"$target_gen_dir/libraries.cc",
"$target_gen_dir/experimental-libraries.cc",
- "src/snapshot-empty.cc",
+ "src/snapshot/snapshot-empty.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
@@ -423,8 +452,8 @@ if (v8_use_external_startup_data) {
]
sources = [
- "src/natives-external.cc",
- "src/snapshot-external.cc",
+ "src/snapshot/natives-external.cc",
+ "src/snapshot/snapshot-external.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
@@ -535,9 +564,7 @@ source_set("v8_base") {
"src/compiler/frame.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
- "src/compiler/generic-algorithm.h",
"src/compiler/graph-builder.h",
- "src/compiler/graph-inl.h",
"src/compiler/graph-reducer.cc",
"src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc",
@@ -566,6 +593,8 @@ source_set("v8_base") {
"src/compiler/js-intrinsic-lowering.h",
"src/compiler/js-operator.cc",
"src/compiler/js-operator.h",
+ "src/compiler/js-type-feedback.cc",
+ "src/compiler/js-type-feedback.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
"src/compiler/jump-threading.cc",
@@ -573,6 +602,8 @@ source_set("v8_base") {
"src/compiler/linkage-impl.h",
"src/compiler/linkage.cc",
"src/compiler/linkage.h",
+ "src/compiler/liveness-analyzer.cc",
+ "src/compiler/liveness-analyzer.h",
"src/compiler/load-elimination.cc",
"src/compiler/load-elimination.h",
"src/compiler/loop-peeling.cc",
@@ -591,6 +622,7 @@ source_set("v8_base") {
"src/compiler/node-cache.h",
"src/compiler/node-marker.cc",
"src/compiler/node-marker.h",
+ "src/compiler/node-matchers.cc",
"src/compiler/node-matchers.h",
"src/compiler/node-properties.cc",
"src/compiler/node-properties.h",
@@ -631,6 +663,8 @@ source_set("v8_base") {
"src/compiler/simplified-operator.h",
"src/compiler/source-position.cc",
"src/compiler/source-position.h",
+ "src/compiler/state-values-utils.cc",
+ "src/compiler/state-values-utils.h",
"src/compiler/typer.cc",
"src/compiler/typer.h",
"src/compiler/value-numbering-reducer.cc",
@@ -848,7 +882,6 @@ source_set("v8_base") {
"src/modules.cc",
"src/modules.h",
"src/msan.h",
- "src/natives.h",
"src/objects-debug.cc",
"src/objects-inl.h",
"src/objects-printer.cc",
@@ -860,6 +893,8 @@ source_set("v8_base") {
"src/ostreams.h",
"src/parser.cc",
"src/parser.h",
+ "src/pending-compilation-error-handler.cc",
+ "src/pending-compilation-error-handler.h",
"src/perf-jit.cc",
"src/perf-jit.h",
"src/preparse-data-format.h",
@@ -929,20 +964,23 @@ source_set("v8_base") {
"src/scopeinfo.h",
"src/scopes.cc",
"src/scopes.h",
- "src/serialize.cc",
- "src/serialize.h",
"src/small-pointer-list.h",
"src/smart-pointers.h",
- "src/snapshot-common.cc",
- "src/snapshot-source-sink.cc",
- "src/snapshot-source-sink.h",
- "src/snapshot.h",
+ "src/snapshot/natives.h",
+ "src/snapshot/serialize.cc",
+ "src/snapshot/serialize.h",
+ "src/snapshot/snapshot-common.cc",
+ "src/snapshot/snapshot-source-sink.cc",
+ "src/snapshot/snapshot-source-sink.h",
+ "src/snapshot/snapshot.h",
"src/string-builder.cc",
"src/string-builder.h",
"src/string-search.cc",
"src/string-search.h",
"src/string-stream.cc",
"src/string-stream.h",
+ "src/strings-storage.cc",
+ "src/strings-storage.h",
"src/strtod.cc",
"src/strtod.h",
"src/token.cc",
@@ -1356,11 +1394,11 @@ source_set("v8_libbase") {
if (is_linux) {
sources += [ "src/base/platform/platform-linux.cc" ]
- libs = [ "rt" ]
+ libs = [ "dl", "rt" ]
} else if (is_android) {
defines += [ "CAN_USE_VFP_INSTRUCTIONS" ]
- if (build_os == "mac") {
+ if (host_os == "mac") {
if (current_toolchain == host_toolchain) {
sources += [ "src/base/platform/platform-macos.cc" ]
} else {
@@ -1425,12 +1463,12 @@ source_set("v8_libplatform") {
# Executables
#
-if (current_toolchain == host_toolchain) {
+if (current_toolchain == snapshot_toolchain) {
executable("mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [
- "src/mksnapshot.cc",
+ "src/snapshot/mksnapshot.cc",
]
configs -= [ "//build/config/compiler:chromium_code" ]
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 0f835dc8c4..69ecd92ba6 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,449 @@
+2015-03-30: Version 4.3.61
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-28: Version 4.3.60
+
+ Reland^2 "Filter invalid slots out from the SlotsBuffer after marking."
+ (Chromium issues 454297, 470801).
+
+ This fixes missing incremental write barrier issue when double fields
+ unboxing is enabled (Chromium issue 469146).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-27: Version 4.3.59
+
+ Use a slot that is located on a heap page when removing invalid entries
+ from the SlotsBuffer (Chromium issue 470801).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-26: Version 4.3.58
+
+ Return timestamp of the last recorded interval to the caller of
+ HeapProfiler::GetHeapStats (Chromium issue 467222).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-26: Version 4.3.57
+
+ Reland [V8] Removed SourceLocationRestrict (Chromium issue 468781).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-25: Version 4.3.56
+
+ Remove v8::Isolate::ClearInterrupt.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-25: Version 4.3.55
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-24: Version 4.3.54
+
+ Do not assign positions to parser-generated desugarings (Chromium issue
+ 468661).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-24: Version 4.3.53
+
+ Filter invalid slots out from the SlotsBuffer after marking (Chromium
+ issue 454297).
+
+ Fix OOM bug 3976 (issue 3976).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-24: Version 4.3.52
+
+ Remove calls to IdleNotification().
+
+ Save heap object tracking data in heap snapshot (Chromium issue 467222).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-24: Version 4.3.51
+
+ [V8] Removed SourceLocationRestrict (Chromium issue 468781).
+
+ [turbofan] Fix control reducer bug with walking non-control edges during
+ ConnectNTL phase (Chromium issue 469605).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-23: Version 4.3.50
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-23: Version 4.3.49
+
+ Ensure we don't overflow in BCE (Chromium issue 469148).
+
+ [turbofan] Fix lowering of Math.max for integral inputs (Chromium issue
+ 468162).
+
+ Use libdl to get symbols for backtraces.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-19: Version 4.3.48
+
+ Clarify what APIs return Maybe and MaybeLocal values (issue 3929).
+
+ Introduce explicit constant for per Context debug data set by embedder
+ (Chromium issue 466631).
+
+ Adjust key behaviour for weak collections (issues 3970, 3971, Chromium
+ issue 460083).
+
+ Turn on overapproximation of the weak closure (issue 3862).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-18: Version 4.3.47
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-17: Version 4.3.46
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-17: Version 4.3.45
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-17: Version 4.3.44
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-16: Version 4.3.43
+
+ Bugfix in hydrogen GVN (Chromium issue 467481).
+
+ Remove obsolete TakeHeapSnapshot method from API (Chromium issue
+ 465651).
+
+ Beautify syntax error for unterminated argument list (Chromium issue
+ 339474).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-16: Version 4.3.42
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-15: Version 4.3.41
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-14: Version 4.3.40
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-14: Version 4.3.39
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-14: Version 4.3.38
+
+ Remove --harmony-scoping flag.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-13: Version 4.3.37
+
+ Implement TDZ in StoreIC for top-level lexicals (issue 3941).
+
+ Turn on job-based optimizing compiler (issue 3608).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-13: Version 4.3.36
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-12: Version 4.3.35
+
+ Add Cast() for Int32 and Uint32 (Chromium issue 462402).
+
+ Incorrect handling of HTransitionElementsKind in hydrogen check
+ elimination phase fixed (Chromium issue 460917).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-12: Version 4.3.34
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-12: Version 4.3.33
+
+ Fix the toolchain used to build the snapshots in GN (Chromium issues
+ 395249, 465456).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-11: Version 4.3.32
+
+ Reland of Remove slots that point to unboxed doubles from the
+ StoreBuffer/SlotsBuffer (Chromium issues 454297, 465273).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-11: Version 4.3.31
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-11: Version 4.3.30
+
+ Remove uid and title from HeapSnapshot (Chromium issue 465651).
+
+ Remove deprecated CpuProfiler methods.
+
+ [turbofan] Fix --turbo-osr for OSRing into inner loop inside for-in
+ (Chromium issue 462775).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-10: Version 4.3.29
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-10: Version 4.3.28
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-10: Version 4.3.27
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-07: Version 4.3.26
+
+ Remove slots that point to unboxed doubles from the
+ StoreBuffer/SlotsBuffer (Chromium issue 454297).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-06: Version 4.3.25
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-06: Version 4.3.24
+
+ convert more things to maybe (issue 3929).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-05: Version 4.3.23
+
+ [V8] Use Function.name for stack frames in v8::StackTrace (Chromium
+ issue 17356).
+
+ Allow passing sourceMapUrl when compiling scripts (Chromium issue
+ 462572).
+
+ convert compile functions to use maybe (issue 3929).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-05: Version 4.3.22
+
+ give UniquePersistent full move semantics (issue 3669).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-05: Version 4.3.21
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-04: Version 4.3.20
+
+ convert remaining object functions to maybes (issue 3929).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-04: Version 4.3.19
+
+ ARM assembler: fix undefined behaviour in fits_shifter (Chromium issues
+ 444089, 463436).
+
+ Implement subclassing Arrays (issue 3930).
+
+ [es6] Fix for-const loops (issue 3983).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-04: Version 4.3.18
+
+ Implement subclassing Arrays (issue 3930).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-04: Version 4.3.17
+
+ Implement subclassing Arrays (issue 3930).
+
+ convert more object functions to return maybes (issue 3929).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-03: Version 4.3.16
+
+ check for null context on execution entry (issue 3929).
+
+ convert object::* to return maybe values (issue 3929).
+
+ Removed funky Maybe constructor and made fields private (issue 3929).
+
+ Polish Maybe API a bit, removing useless creativity and fixing some
+ signatures (issue 3929).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-02: Version 4.3.15
+
+ Performance and stability improvements on all platforms.
+
+
+2015-03-02: Version 4.3.14
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-28: Version 4.3.13
+
+ Disallow subclassing Arrays (issue 3930).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-28: Version 4.3.12
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-27: Version 4.3.11
+
+ Disallow subclassing Arrays (issue 3930).
+
+ convert Value::*Value() function to return Maybe results (issue 3929).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-27: Version 4.3.10
+
+ Convert v8::Value::To* to use MaybeLocal (issue 3929).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-26: Version 4.3.9
+
+ Add public version macros (issue 3075).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-26: Version 4.3.8
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-25: Version 4.3.7
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-25: Version 4.3.6
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-25: Version 4.3.5
+
+ Turn on job based recompilation (issue 3608).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-24: Version 4.3.4
+
+ Reland "Correctly propagate terminate exception in TryCall." (issue
+ 3892).
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-24: Version 4.3.3
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-24: Version 4.3.2
+
+ Update GN build files with the cpu_arch -> current_cpu change.
+
+ Performance and stability improvements on all platforms.
+
+
+2015-02-23: Version 4.3.1
+
+ Limit size of first page based on serialized data (Chromium issue
+ 453111).
+
+ Performance and stability improvements on all platforms.
+
+
2015-02-19: Version 4.2.77
Make generator constructors configurable (issue 3902).
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index b829d05dab..42606acfcf 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,17 +8,17 @@ vars = {
deps = {
"v8/build/gyp":
- Var("git_url") + "/external/gyp.git" + "@" + "34640080d08ab2a37665512e52142947def3056d",
+ Var("git_url") + "/external/gyp.git" + "@" + "d174d75bf69c682cb62af9187879e01513b35e52",
"v8/third_party/icu":
- Var("git_url") + "/chromium/deps/icu.git" + "@" + "4e3266f32c62d30a3f9e2232a753c60129d1e670",
+ Var("git_url") + "/chromium/deps/icu.git" + "@" + "7c81740601355556e630da515b74d889ba2f8d08",
"v8/buildtools":
- Var("git_url") + "/chromium/buildtools.git" + "@" + "5c5e924788fe40f7d6e0a3841ac572de2475e689",
+ Var("git_url") + "/chromium/buildtools.git" + "@" + "3b302fef93f7cc58d9b8168466905237484b2772",
"v8/testing/gtest":
Var("git_url") + "/external/googletest.git" + "@" + "be1868139ffe0ccd0e8e3b37292b84c821d9c8ad",
"v8/testing/gmock":
Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271", # from svn revision 501
"v8/tools/clang":
- Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "f6daa55d03995e82201a3278203e7c0421a59546",
+ Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "ea2f0a2d96ffc6f5a51c034db704ccc1a6543156",
}
deps_os = {
@@ -46,6 +46,17 @@ skip_child_includes = [
]
hooks = [
+ {
+ # This clobbers when necessary (based on get_landmines.py). It must be the
+ # first hook so that other things that get/generate into the output
+ # directory will not subsequently be clobbered.
+ 'name': 'landmines',
+ 'pattern': '.',
+ 'action': [
+ 'python',
+ 'v8/build/landmines.py',
+ ],
+ },
# Pull clang-format binaries using checked-in hashes.
{
"name": "clang_format_win",
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 5468d91334..055a57d286 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -234,7 +234,8 @@ ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87 ppc ppc64
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
-ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
+ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
+ android_mipsel android_x87
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
diff --git a/deps/v8/Makefile.android b/deps/v8/Makefile.android
index 2a3640382b..f89fd21fda 100644
--- a/deps/v8/Makefile.android
+++ b/deps/v8/Makefile.android
@@ -26,7 +26,8 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
-ANDROID_ARCHES = android_ia32 android_arm android_arm64 android_mipsel android_x87
+ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
+ android_mipsel android_x87
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
@@ -66,6 +67,11 @@ else ifeq ($(ARCH), android_ia32)
TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.8
+else ifeq ($(ARCH), android_x64)
+ DEFINES = target_arch=x64 v8_target_arch=x64 android_target_arch=x86_64 android_target_platform=21
+ TOOLCHAIN_ARCH = x86_64
+ TOOLCHAIN_PREFIX = x86_64-linux-android
+ TOOLCHAIN_VER = 4.9
else ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index fd0601f17b..5b3d58d3ba 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -244,6 +244,7 @@ def GetPreferredTryMasters(project, change):
return {
'tryserver.v8': {
'v8_linux_rel': set(['defaulttests']),
+ 'v8_linux_dbg': set(['defaulttests']),
'v8_linux_nodcheck_rel': set(['defaulttests']),
'v8_linux_gcc_compile_rel': set(['defaulttests']),
'v8_linux64_rel': set(['defaulttests']),
diff --git a/deps/v8/README.md b/deps/v8/README.md
index bc1685affa..5cd4b5811a 100644
--- a/deps/v8/README.md
+++ b/deps/v8/README.md
@@ -18,13 +18,13 @@ Getting the Code
Checkout [depot tools](http://www.chromium.org/developers/how-tos/install-depot-tools), and run
-> `fetch v8`
+ fetch v8
This will checkout V8 into the directory `v8` and fetch all of its dependencies.
To stay up to date, run
-> `git pull origin`
-> `gclient sync`
+ git pull origin
+ gclient sync
For fetching all branches, add the following into your remote
configuration in `.git/config`:
diff --git a/deps/v8/build/android.gypi b/deps/v8/build/android.gypi
index 5d3b25a746..533250e7f5 100644
--- a/deps/v8/build/android.gypi
+++ b/deps/v8/build/android.gypi
@@ -43,7 +43,13 @@
'android_stlport': '<(android_toolchain)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
- 'android_lib': '<(android_sysroot)/usr/lib',
+ 'conditions': [
+ ['target_arch=="x64"', {
+ 'android_lib': '<(android_sysroot)/usr/lib64',
+ }, {
+ 'android_lib': '<(android_sysroot)/usr/lib',
+ }],
+ ],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}, {
@@ -52,7 +58,13 @@
'android_stlport': '<(android_ndk_root)/sources/cxx-stl/stlport/',
},
'android_include': '<(android_sysroot)/usr/include',
- 'android_lib': '<(android_sysroot)/usr/lib',
+ 'conditions': [
+ ['target_arch=="x64"', {
+ 'android_lib': '<(android_sysroot)/usr/lib64',
+ }, {
+ 'android_lib': '<(android_sysroot)/usr/lib',
+ }],
+ ],
'android_stlport_include': '<(android_stlport)/stlport',
'android_stlport_libs': '<(android_stlport)/libs',
}],
@@ -227,7 +239,7 @@
'target_conditions': [
['_type=="executable"', {
'conditions': [
- ['target_arch=="arm64"', {
+ ['target_arch=="arm64" or target_arch=="x64"', {
'ldflags': [
'-Wl,-dynamic-linker,/system/bin/linker64',
],
diff --git a/deps/v8/build/detect_v8_host_arch.py b/deps/v8/build/detect_v8_host_arch.py
index 3460a9a404..89e8286e1f 100644
--- a/deps/v8/build/detect_v8_host_arch.py
+++ b/deps/v8/build/detect_v8_host_arch.py
@@ -41,6 +41,7 @@ def DoMain(_):
"""Hook to be called from gyp without starting a separate python
interpreter."""
host_arch = platform.machine()
+ host_system = platform.system();
# Convert machine type to format recognized by gyp.
if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
@@ -56,6 +57,13 @@ def DoMain(_):
elif host_arch.startswith('mips'):
host_arch = 'mipsel'
+ # Under AIX the value returned by platform.machine is not
+ # the best indicator of the host architecture
+ # AIX 6.1 which is the lowest level supported only provides
+ # a 64 bit kernel
+ if host_system == 'AIX':
+ host_arch = 'ppc64'
+
# platform.machine is based on running kernel. It's possible to use 64-bit
# kernel with 32-bit userland, e.g. to give linker slightly more memory.
# Distinguish between different userland bitness by querying
diff --git a/deps/v8/build/features.gypi b/deps/v8/build/features.gypi
index 2eadca3384..5c60273a61 100644
--- a/deps/v8/build/features.gypi
+++ b/deps/v8/build/features.gypi
@@ -102,7 +102,7 @@
'DebugBaseCommon': {
'abstract': 1,
'variables': {
- 'v8_enable_handle_zapping%': 1,
+ 'v8_enable_handle_zapping%': 0,
},
'conditions': [
['v8_enable_handle_zapping==1', {
@@ -112,7 +112,7 @@
}, # Debug
'Release': {
'variables': {
- 'v8_enable_handle_zapping%': 0,
+ 'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {
diff --git a/deps/v8/build/get_landmines.py b/deps/v8/build/get_landmines.py
index 66a86cbb50..f61c04de44 100755
--- a/deps/v8/build/get_landmines.py
+++ b/deps/v8/build/get_landmines.py
@@ -20,6 +20,7 @@ def main():
print 'Activating MSVS 2013.'
print 'Revert activation of MSVS 2013.'
print 'Activating MSVS 2013 again.'
+ print 'Clobber after ICU roll.'
return 0
diff --git a/deps/v8/build/gyp_environment.py b/deps/v8/build/gyp_environment.py
new file mode 100644
index 0000000000..f1cee6ef8e
--- /dev/null
+++ b/deps/v8/build/gyp_environment.py
@@ -0,0 +1,52 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Sets up various automatic gyp environment variables. These are used by
+gyp_v8 and landmines.py which run at different stages of runhooks. To
+make sure settings are consistent between them, all setup should happen here.
+"""
+
+import os
+import sys
+
+SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
+V8_ROOT = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
+
+
+def apply_gyp_environment(file_path=None):
+ """
+ Reads in a *.gyp_env file and applies the valid keys to os.environ.
+ """
+ if not file_path or not os.path.exists(file_path):
+ return
+ file_contents = open(file_path).read()
+ try:
+ file_data = eval(file_contents, {'__builtins__': None}, None)
+ except SyntaxError, e:
+ e.filename = os.path.abspath(file_path)
+ raise
+ supported_vars = ( 'V8_GYP_FILE',
+ 'V8_GYP_SYNTAX_CHECK',
+ 'GYP_DEFINES',
+ 'GYP_GENERATOR_FLAGS',
+ 'GYP_GENERATOR_OUTPUT', )
+ for var in supported_vars:
+ val = file_data.get(var)
+ if val:
+ if var in os.environ:
+ print 'INFO: Environment value for "%s" overrides value in %s.' % (
+ var, os.path.abspath(file_path)
+ )
+ else:
+ os.environ[var] = val
+
+
+def set_environment():
+ """Sets defaults for GYP_* variables."""
+
+ if 'SKIP_V8_GYP_ENV' not in os.environ:
+ # Update the environment based on v8.gyp_env
+ gyp_env_path = os.path.join(os.path.dirname(V8_ROOT), 'v8.gyp_env')
+ apply_gyp_environment(gyp_env_path)
diff --git a/deps/v8/build/gyp_v8 b/deps/v8/build/gyp_v8
index 14467eccaa..1e8a5c806e 100755
--- a/deps/v8/build/gyp_v8
+++ b/deps/v8/build/gyp_v8
@@ -31,6 +31,7 @@
# is invoked by V8 beyond what can be done in the gclient hooks.
import glob
+import gyp_environment
import os
import platform
import shlex
@@ -48,34 +49,6 @@ sys.path.insert(
1, os.path.abspath(os.path.join(v8_root, 'tools', 'generate_shim_headers')))
-def apply_gyp_environment(file_path=None):
- """
- Reads in a *.gyp_env file and applies the valid keys to os.environ.
- """
- if not file_path or not os.path.exists(file_path):
- return
- file_contents = open(file_path).read()
- try:
- file_data = eval(file_contents, {'__builtins__': None}, None)
- except SyntaxError, e:
- e.filename = os.path.abspath(file_path)
- raise
- supported_vars = ( 'V8_GYP_FILE',
- 'V8_GYP_SYNTAX_CHECK',
- 'GYP_DEFINES',
- 'GYP_GENERATOR_FLAGS',
- 'GYP_GENERATOR_OUTPUT', )
- for var in supported_vars:
- val = file_data.get(var)
- if val:
- if var in os.environ:
- print 'INFO: Environment value for "%s" overrides value in %s.' % (
- var, os.path.abspath(file_path)
- )
- else:
- os.environ[var] = val
-
-
def additional_include_files(args=[]):
"""
Returns a list of additional (.gypi) files to include, without
@@ -109,13 +82,6 @@ def additional_include_files(args=[]):
def run_gyp(args):
rc = gyp.main(args)
- # Check for landmines (reasons to clobber the build). This must be run here,
- # rather than a separate runhooks step so that any environment modifications
- # from above are picked up.
- print 'Running build/landmines.py...'
- subprocess.check_call(
- [sys.executable, os.path.join(script_dir, 'landmines.py')])
-
if rc != 0:
print 'Error running GYP'
sys.exit(rc)
@@ -124,10 +90,7 @@ def run_gyp(args):
if __name__ == '__main__':
args = sys.argv[1:]
- if 'SKIP_V8_GYP_ENV' not in os.environ:
- # Update the environment based on v8.gyp_env
- gyp_env_path = os.path.join(os.path.dirname(v8_root), 'v8.gyp_env')
- apply_gyp_environment(gyp_env_path)
+ gyp_environment.set_environment()
# This could give false positives since it doesn't actually do real option
# parsing. Oh well.
diff --git a/deps/v8/build/landmine_utils.py b/deps/v8/build/landmine_utils.py
index e8b7c98d5f..cb3499132a 100644
--- a/deps/v8/build/landmine_utils.py
+++ b/deps/v8/build/landmine_utils.py
@@ -47,10 +47,19 @@ def gyp_defines():
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
+
+@memoize()
+def gyp_generator_flags():
+ """Parses and returns GYP_GENERATOR_FLAGS env var as a dictionary."""
+ return dict(arg.split('=', 1)
+ for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', '')))
+
+
@memoize()
def gyp_msvs_version():
return os.environ.get('GYP_MSVS_VERSION', '')
+
@memoize()
def distributor():
"""
diff --git a/deps/v8/build/landmines.py b/deps/v8/build/landmines.py
index bd1fb28f71..97c63901c1 100755
--- a/deps/v8/build/landmines.py
+++ b/deps/v8/build/landmines.py
@@ -4,10 +4,9 @@
# found in the LICENSE file.
"""
-This script runs every build as a hook. If it detects that the build should
-be clobbered, it will touch the file <build_dir>/.landmine_triggered. The
-various build scripts will then check for the presence of this file and clobber
-accordingly. The script will also emit the reasons for the clobber to stdout.
+This script runs every build as the first hook (See DEPS). If it detects that
+the build should be clobbered, it will delete the contents of the build
+directory.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
@@ -15,9 +14,13 @@ build is clobbered.
"""
import difflib
+import errno
+import gyp_environment
import logging
import optparse
import os
+import re
+import shutil
import sys
import subprocess
import time
@@ -28,46 +31,109 @@ import landmine_utils
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-def get_target_build_dir(build_tool, target):
+def get_build_dir(build_tool, is_iphone=False):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
- r'c:\b\build\slave\win\build\src\out\Release'
- '/mnt/data/b/build/slave/linux/build/src/out/Debug'
- '/b/build/slave/ios_rel_device/build/src/xcodebuild/Release-iphoneos'
+ r'c:\b\build\slave\win\build\src\out'
+ '/mnt/data/b/build/slave/linux/build/src/out'
+ '/b/build/slave/ios_rel_device/build/src/xcodebuild'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
- ret = os.path.join(SRC_DIR, 'xcodebuild', target)
+ ret = os.path.join(SRC_DIR, 'xcodebuild')
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
- ret = os.path.join(SRC_DIR, 'out', target)
+ if 'CHROMIUM_OUT_DIR' in os.environ:
+ output_dir = os.environ.get('CHROMIUM_OUT_DIR').strip()
+ if not output_dir:
+ raise Error('CHROMIUM_OUT_DIR environment variable is set but blank!')
+ else:
+ output_dir = landmine_utils.gyp_generator_flags().get('output_dir', 'out')
+ ret = os.path.join(SRC_DIR, output_dir)
elif build_tool in ['msvs', 'vs', 'ib']:
- ret = os.path.join(SRC_DIR, 'build', target)
+ ret = os.path.join(SRC_DIR, 'build')
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
-def set_up_landmines(target, new_landmines):
- """Does the work of setting, planting, and triggering landmines."""
- out_dir = get_target_build_dir(landmine_utils.builder(), target)
-
- landmines_path = os.path.join(out_dir, '.landmines')
- if not os.path.exists(out_dir):
+def extract_gn_build_commands(build_ninja_file):
+ """Extracts from a build.ninja the commands to run GN.
+
+ The commands to run GN are the gn rule and build.ninja build step at the
+ top of the build.ninja file. We want to keep these when deleting GN builds
+ since we want to preserve the command-line flags to GN.
+
+ On error, returns the empty string."""
+ result = ""
+ with open(build_ninja_file, 'r') as f:
+ # Read until the second blank line. The first thing GN writes to the file
+ # is the "rule gn" and the second is the section for "build build.ninja",
+ # separated by blank lines.
+ num_blank_lines = 0
+ while num_blank_lines < 2:
+ line = f.readline()
+ if len(line) == 0:
+ return '' # Unexpected EOF.
+ result += line
+ if line[0] == '\n':
+ num_blank_lines = num_blank_lines + 1
+ return result
+
+def delete_build_dir(build_dir):
+ # GN writes a build.ninja.d file. Note that not all GN builds have args.gn.
+ build_ninja_d_file = os.path.join(build_dir, 'build.ninja.d')
+ if not os.path.exists(build_ninja_d_file):
+ shutil.rmtree(build_dir)
return
- if not os.path.exists(landmines_path):
- print "Landmines tracker didn't exists."
-
- # FIXME(machenbach): Clobber deletes the .landmines tracker. Difficult
- # to know if we are right after a clobber or if it is first-time landmines
- # deployment. Also, a landmine-triggered clobber right after a clobber is
- # not possible. Different clobber methods for msvs, xcode and make all
- # have different blacklists of files that are not deleted.
+ # GN builds aren't automatically regenerated when you sync. To avoid
+ # messing with the GN workflow, erase everything but the args file, and
+ # write a dummy build.ninja file that will automatically rerun GN the next
+ # time Ninja is run.
+ build_ninja_file = os.path.join(build_dir, 'build.ninja')
+ build_commands = extract_gn_build_commands(build_ninja_file)
+
+ try:
+ gn_args_file = os.path.join(build_dir, 'args.gn')
+ with open(gn_args_file, 'r') as f:
+ args_contents = f.read()
+ except IOError:
+ args_contents = ''
+
+ shutil.rmtree(build_dir)
+
+ # Put back the args file (if any).
+ os.mkdir(build_dir)
+ if args_contents != '':
+ with open(gn_args_file, 'w') as f:
+ f.write(args_contents)
+
+ # Write the build.ninja file sufficiently to regenerate itself.
+ with open(os.path.join(build_dir, 'build.ninja'), 'w') as f:
+ if build_commands != '':
+ f.write(build_commands)
+ else:
+ # Couldn't parse the build.ninja file, write a default thing.
+ f.write('''rule gn
+command = gn -q gen //out/%s/
+description = Regenerating ninja files
+
+build build.ninja: gn
+generator = 1
+depfile = build.ninja.d
+''' % (os.path.split(build_dir)[1]))
+
+ # Write a .d file for the build which references a nonexistant file. This
+ # will make Ninja always mark the build as dirty.
+ with open(build_ninja_d_file, 'w') as f:
+ f.write('build.ninja: nonexistant_file.gn\n')
+
+
+def needs_clobber(landmines_path, new_landmines):
if os.path.exists(landmines_path):
- triggered = os.path.join(out_dir, '.landmines_triggered')
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
@@ -75,14 +141,54 @@ def set_up_landmines(target, new_landmines):
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
+ sys.stdout.write('Clobbering due to:\n')
+ sys.stdout.writelines(diff)
+ return True
+ else:
+ sys.stdout.write('Clobbering due to missing landmines file.\n')
+ return True
+ return False
- with open(triggered, 'w') as f:
- f.writelines(diff)
- print "Setting landmine: %s" % triggered
- elif os.path.exists(triggered):
- # Remove false triggered landmines.
- os.remove(triggered)
- print "Removing landmine: %s" % triggered
+
+def clobber_if_necessary(new_landmines):
+ """Does the work of setting, planting, and triggering landmines."""
+ out_dir = get_build_dir(landmine_utils.builder())
+ landmines_path = os.path.normpath(os.path.join(out_dir, '..', '.landmines'))
+ try:
+ os.makedirs(out_dir)
+ except OSError as e:
+ if e.errno == errno.EEXIST:
+ pass
+
+ if needs_clobber(landmines_path, new_landmines):
+ # Clobber contents of build directory but not directory itself: some
+ # checkouts have the build directory mounted.
+ for f in os.listdir(out_dir):
+ path = os.path.join(out_dir, f)
+ if os.path.basename(out_dir) == 'build':
+ # Only delete build directories and files for MSVS builds as the folder
+ # shares some checked out files and directories.
+ if (os.path.isdir(path) and
+ re.search(r'(?:[Rr]elease)|(?:[Dd]ebug)', f)):
+ delete_build_dir(path)
+ elif (os.path.isfile(path) and
+ (path.endswith('.sln') or
+ path.endswith('.vcxproj') or
+ path.endswith('.vcxproj.user'))):
+ os.unlink(path)
+ else:
+ if os.path.isfile(path):
+ os.unlink(path)
+ elif os.path.isdir(path):
+ delete_build_dir(path)
+ if os.path.basename(out_dir) == 'xcodebuild':
+ # Xcodebuild puts an additional project file structure into build,
+ # while the output folder is xcodebuild.
+ project_dir = os.path.join(SRC_DIR, 'build', 'all.xcodeproj')
+ if os.path.exists(project_dir) and os.path.isdir(project_dir):
+ delete_build_dir(project_dir)
+
+ # Save current set of landmines for next time.
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
@@ -123,14 +229,14 @@ def main():
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
+ gyp_environment.set_environment()
+
landmines = []
for s in landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
-
- for target in ('Debug', 'Release'):
- set_up_landmines(target, landmines)
+ clobber_if_necessary(landmines)
return 0
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index 56cebbe1f3..d95cb7a0a2 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -146,11 +146,17 @@
}, {
'v8_enable_gdbjit%': 0,
}],
- ['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64")', {
+ ['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
+ (v8_target_arch!="x87")', {
'clang%': 1,
}, {
'clang%': 0,
}],
+ ['host_arch!="ppc" and host_arch!="ppc64" and host_arch!="ppc64le"', {
+ 'host_clang%': '1',
+ }, {
+ 'host_clang%': '0',
+ }],
],
# Default ARM variable settings.
'arm_version%': 'default',
@@ -175,16 +181,11 @@
'default_configuration': 'Debug',
'configurations': {
'DebugBaseCommon': {
- 'cflags': [ '-g', '-O0' ],
'conditions': [
- ['(v8_target_arch=="ia32" or v8_target_arch=="x87") and \
- OS=="linux"', {
- 'defines': [
- '_GLIBCXX_DEBUG'
- ],
- }],
- [ 'OS=="aix"', {
- 'cflags': [ '-gxcoff' ],
+ ['OS=="aix"', {
+ 'cflags': [ '-g', '-Og', '-gxcoff' ],
+ }, {
+ 'cflags': [ '-g', '-O0' ],
}],
],
},
@@ -198,6 +199,19 @@
# Xcode insists on this empty entry.
},
},
+ 'conditions':[
+ ['(clang==1 or host_clang==1) and OS!="win"', {
+ # This is here so that all files get recompiled after a clang roll and
+ # when turning clang on or off.
+ # (defines are passed via the command line, and build systems rebuild
+ # things when their commandline changes). Nothing should ever read this
+ # define.
+ 'defines': ['CR_CLANG_REVISION=<!(<(DEPTH)/tools/clang/scripts/update.sh --print-revision)'],
+ 'cflags+': [
+ '-Wno-format-pedantic',
+ ],
+ }],
+ ],
'target_conditions': [
['v8_code == 0', {
'defines!': [
@@ -205,8 +219,33 @@
],
'conditions': [
['os_posix == 1 and OS != "mac"', {
+ # We don't want to get warnings from third-party code,
+ # so remove any existing warning-enabling flags like -Wall.
'cflags!': [
+ '-pedantic',
+ '-Wall',
'-Werror',
+ '-Wextra',
+ ],
+ 'cflags+': [
+ # Clang considers the `register` keyword as deprecated, but
+ # ICU uses it all over the place.
+ '-Wno-deprecated-register',
+ # ICU uses its own deprecated functions.
+ '-Wno-deprecated-declarations',
+ # ICU prefers `a && b || c` over `(a && b) || c`.
+ '-Wno-logical-op-parentheses',
+ # ICU has some `unsigned < 0` checks.
+ '-Wno-tautological-compare',
+ # uresdata.c has switch(RES_GET_TYPE(x)) code. The
+ # RES_GET_TYPE macro returns an UResType enum, but some switch
+ # statement contains case values that aren't part of that
+ # enum (e.g. URES_TABLE32 which is in UResInternalType). This
+ # is on purpose.
+ '-Wno-switch',
+ ],
+ 'cflags_cc!': [
+ '-Wnon-virtual-dtor',
],
}],
['OS == "mac"', {
@@ -292,7 +331,6 @@
'cflags': [
'-Wall',
'<(werror)',
- '-W',
'-Wno-unused-parameter',
'-Wno-long-long',
'-pthread',
@@ -304,7 +342,7 @@
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'ldflags': [ '-pthread', ],
'conditions': [
- [ 'host_arch=="ppc64"', {
+ [ 'host_arch=="ppc64" and OS!="aix"', {
'cflags': [ '-mminimal-toc' ],
}],
[ 'visibility=="hidden" and v8_enable_backtrace==0', {
@@ -323,7 +361,6 @@
'cflags': [
'-Wall',
'<(werror)',
- '-W',
'-Wno-unused-parameter',
'-fno-exceptions',
# Don't warn about the "struct foo f = {0};" initialization pattern.
@@ -466,7 +503,6 @@
'WARNING_CFLAGS': [
'-Wall',
'-Wendif-labels',
- '-W',
'-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
@@ -492,6 +528,31 @@
], # target_conditions
}, # target_defaults
}], # OS=="mac"
+ ['clang!=1 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
+ 'make_global_settings': [
+ ['CC.host', '../<(clang_dir)/bin/clang'],
+ ['CXX.host', '../<(clang_dir)/bin/clang++'],
+ ],
+ }],
+ ['clang==0 and host_clang==1 and target_arch!="ia32" and target_arch!="x64"', {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'cflags_cc': [ '-std=gnu++11', ],
+ }],
+ ],
+ 'target_defaults': {
+ 'target_conditions': [
+ ['_toolset=="host"', { 'cflags!': [ '-Wno-unused-local-typedefs' ]}],
+ ],
+ },
+ }],
+ ['clang==1 and "<(GENERATOR)"=="ninja"', {
+ # See http://crbug.com/110262
+ 'target_defaults': {
+ 'cflags': [ '-fcolor-diagnostics' ],
+ 'xcode_settings': { 'OTHER_CFLAGS': [ '-fcolor-diagnostics' ] },
+ },
+ }],
['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) '
'and OS!="win" and "<(GENERATOR)"=="make"', {
'make_global_settings': [
diff --git a/deps/v8/build/toolchain.gypi b/deps/v8/build/toolchain.gypi
index d4a9403cbd..f1f46c89c7 100644
--- a/deps/v8/build/toolchain.gypi
+++ b/deps/v8/build/toolchain.gypi
@@ -61,6 +61,9 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
+ # Force disable libstdc++ debug mode.
+ 'disable_glibcxx_debug%': 0,
+
'v8_enable_backtrace%': 0,
# Enable profiling support. Only required on Windows.
@@ -1134,8 +1137,18 @@
# Support for backtrace_symbols.
'ldflags': [ '-rdynamic' ],
}],
+ ['OS=="linux" and disable_glibcxx_debug==0', {
+ # Enable libstdc++ debugging facilities to help catch problems
+ # early, see http://crbug.com/65151 .
+ 'defines': ['_GLIBCXX_DEBUG=1',],
+ }],
['OS=="aix"', {
'ldflags': [ '-Wl,-bbigtoc' ],
+ 'conditions': [
+ ['v8_target_arch=="ppc64"', {
+ 'cflags': [ '-maix64 -mcmodel=large' ],
+ }],
+ ],
}],
['OS=="android"', {
'variables': {
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index 6abf4e095b..ae11a2aeea 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -202,13 +202,22 @@ class V8_EXPORT Debug {
* }
* \endcode
*/
- static Local<Value> Call(v8::Handle<v8::Function> fun,
- Handle<Value> data = Handle<Value>());
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<Value> Call(v8::Handle<v8::Function> fun,
+ Handle<Value> data = Handle<Value>()));
+ // TODO(dcarney): data arg should be a MaybeLocal
+ static MaybeLocal<Value> Call(Local<Context> context,
+ v8::Handle<v8::Function> fun,
+ Handle<Value> data = Handle<Value>());
/**
* Returns a mirror object for the given object.
*/
- static Local<Value> GetMirror(v8::Handle<v8::Value> obj);
+ static V8_DEPRECATE_SOON("Use maybe version",
+ Local<Value> GetMirror(v8::Handle<v8::Value> obj));
+ static MaybeLocal<Value> GetMirror(Local<Context> context,
+ v8::Handle<v8::Value> obj);
/**
* Makes V8 process all pending debug messages.
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index d0215205f5..f9439c2e62 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -168,21 +168,12 @@ class V8_EXPORT CpuProfiler {
*/
void StartProfiling(Handle<String> title, bool record_samples = false);
- /** Deprecated. Use StartProfiling instead. */
- V8_DEPRECATED("Use StartProfiling",
- void StartCpuProfiling(Handle<String> title,
- bool record_samples = false));
-
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
*/
CpuProfile* StopProfiling(Handle<String> title);
- /** Deprecated. Use StopProfiling instead. */
- V8_DEPRECATED("Use StopProfiling",
- const CpuProfile* StopCpuProfiling(Handle<String> title));
-
/**
* Tells the profiler whether the embedder is idle.
*/
@@ -272,10 +263,6 @@ class V8_EXPORT HeapGraphNode {
SnapshotObjectId GetId() const;
/** Returns node's own size, in bytes. */
- V8_DEPRECATED("Use GetShallowSize instead",
- int GetSelfSize() const);
-
- /** Returns node's own size, in bytes. */
size_t GetShallowSize() const;
/** Returns child nodes count of the node. */
@@ -326,12 +313,6 @@ class V8_EXPORT HeapSnapshot {
kJSON = 0 // See format description near 'Serialize' method.
};
- /** Returns heap snapshot UID (assigned by the profiler.) */
- unsigned GetUid() const;
-
- /** Returns heap snapshot title. */
- Handle<String> GetTitle() const;
-
/** Returns the root node of the heap graph. */
const HeapGraphNode* GetRoot() const;
@@ -380,7 +361,8 @@ class V8_EXPORT HeapSnapshot {
* Nodes reference strings, other nodes, and edges by their indexes
* in corresponding arrays.
*/
- void Serialize(OutputStream* stream, SerializationFormat format) const;
+ void Serialize(OutputStream* stream,
+ SerializationFormat format = kJSON) const;
};
@@ -465,10 +447,9 @@ class V8_EXPORT HeapProfiler {
};
/**
- * Takes a heap snapshot and returns it. Title may be an empty string.
+ * Takes a heap snapshot and returns it.
*/
const HeapSnapshot* TakeHeapSnapshot(
- Handle<String> title,
ActivityControl* control = NULL,
ObjectNameResolver* global_object_name_resolver = NULL);
@@ -490,17 +471,19 @@ class V8_EXPORT HeapProfiler {
* reports updates for all previous time intervals via the OutputStream
* object. Updates on each time interval are provided as a stream of the
* HeapStatsUpdate structure instances.
- * The return value of the function is the last seen heap object Id.
+ * If |timestamp_us| is supplied, timestamp of the new entry will be written
+ * into it. The return value of the function is the last seen heap object Id.
*
* StartTrackingHeapObjects must be called before the first call to this
* method.
*/
- SnapshotObjectId GetHeapStats(OutputStream* stream);
+ SnapshotObjectId GetHeapStats(OutputStream* stream,
+ int64_t* timestamp_us = NULL);
/**
* Stops tracking of heap objects population statistics, cleans up all
* collected data. StartHeapObjectsTracking must be called again prior to
- * calling PushHeapObjectsStats next time.
+ * calling GetHeapStats next time.
*/
void StopTrackingHeapObjects();
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index ca36b6c58b..b01d527754 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -12,7 +12,7 @@
/**
* Support for Persistent containers.
*
- * C++11 embedders can use STL containers with UniquePersistent values,
+ * C++11 embedders can use STL containers with Global values,
* but pre-C++11 does not support the required move semantic and hence
* may want these container classes.
*/
@@ -22,7 +22,10 @@ typedef uintptr_t PersistentContainerValue;
static const uintptr_t kPersistentContainerNotFound = 0;
enum PersistentContainerCallbackType {
kNotWeak,
- kWeak
+ // These correspond to v8::WeakCallbackType
+ kWeakWithParameter,
+ kWeakWithInternalFields,
+ kWeak = kWeakWithParameter // For backwards compatibility. Deprecate.
};
@@ -101,12 +104,12 @@ class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> {
return K();
}
static void DisposeCallbackData(WeakCallbackDataType* data) { }
- static void Dispose(Isolate* isolate, UniquePersistent<V> value, K key) { }
+ static void Dispose(Isolate* isolate, Global<V> value, K key) {}
};
template <typename K, typename V>
-class DefaultPhantomPersistentValueMapTraits : public StdMapTraits<K, V> {
+class DefaultGlobalMapTraits : public StdMapTraits<K, V> {
private:
template <typename T>
struct RemovePointer;
@@ -114,25 +117,26 @@ class DefaultPhantomPersistentValueMapTraits : public StdMapTraits<K, V> {
public:
// Weak callback & friends:
static const PersistentContainerCallbackType kCallbackType = kNotWeak;
- typedef PersistentValueMap<
- K, V, DefaultPhantomPersistentValueMapTraits<K, V> > MapType;
- typedef void PhantomCallbackDataType;
+ typedef PersistentValueMap<K, V, DefaultGlobalMapTraits<K, V> > MapType;
+ typedef void WeakCallbackInfoType;
- static PhantomCallbackDataType* PhantomCallbackParameter(MapType* map,
- const K& key,
- Local<V> value) {
- return NULL;
+ static WeakCallbackInfoType* WeakCallbackParameter(MapType* map, const K& key,
+ Local<V> value) {
+ return nullptr;
}
- static MapType* MapFromPhantomCallbackData(
- const PhantomCallbackData<PhantomCallbackDataType>& data) {
- return NULL;
+ static MapType* MapFromWeakCallbackInfo(
+ const WeakCallbackInfo<WeakCallbackInfoType>& data) {
+ return nullptr;
}
- static K KeyFromPhantomCallbackData(
- const PhantomCallbackData<PhantomCallbackDataType>& data) {
+ static K KeyFromWeakCallbackInfo(
+ const WeakCallbackInfo<WeakCallbackInfoType>& data) {
return K();
}
- static void DisposeCallbackData(PhantomCallbackDataType* data) {}
- static void Dispose(Isolate* isolate, UniquePersistent<V> value, K key) {}
+ static void DisposeCallbackData(WeakCallbackInfoType* data) {}
+ static void Dispose(Isolate* isolate, Global<V> value, K key) {}
+ static void DisposeWeak(Isolate* isolate,
+ const WeakCallbackInfo<WeakCallbackInfoType>& data,
+ K key) {}
private:
template <typename T>
@@ -143,8 +147,8 @@ class DefaultPhantomPersistentValueMapTraits : public StdMapTraits<K, V> {
/**
- * A map wrapper that allows using UniquePersistent as a mapped value.
- * C++11 embedders don't need this class, as they can use UniquePersistent
+ * A map wrapper that allows using Global as a mapped value.
+ * C++11 embedders don't need this class, as they can use Global
* directly in std containers.
*
* The map relies on a backing map, whose type and accessors are described
@@ -203,7 +207,7 @@ class PersistentValueMapBase {
/**
* Return value for key and remove it from the map.
*/
- UniquePersistent<V> Remove(const K& key) {
+ Global<V> Remove(const K& key) {
return Release(Traits::Remove(&impl_, key)).Pass();
}
@@ -255,7 +259,7 @@ class PersistentValueMapBase {
private:
friend class PersistentValueMapBase;
friend class PersistentValueMap<K, V, Traits>;
- friend class PhantomPersistentValueMap<K, V, Traits>;
+ friend class GlobalValueMap<K, V, Traits>;
explicit PersistentValueReference(PersistentContainerValue value)
: value_(value) { }
@@ -293,24 +297,23 @@ class PersistentValueMapBase {
return reinterpret_cast<V*>(v);
}
- static PersistentContainerValue ClearAndLeak(
- UniquePersistent<V>* persistent) {
+ static PersistentContainerValue ClearAndLeak(Global<V>* persistent) {
V* v = persistent->val_;
persistent->val_ = 0;
return reinterpret_cast<PersistentContainerValue>(v);
}
- static PersistentContainerValue Leak(UniquePersistent<V>* persistent) {
+ static PersistentContainerValue Leak(Global<V>* persistent) {
return reinterpret_cast<PersistentContainerValue>(persistent->val_);
}
/**
- * Return a container value as UniquePersistent and make sure the weak
+ * Return a container value as Global and make sure the weak
* callback is properly disposed of. All remove functionality should go
* through this.
*/
- static UniquePersistent<V> Release(PersistentContainerValue v) {
- UniquePersistent<V> p;
+ static Global<V> Release(PersistentContainerValue v) {
+ Global<V> p;
p.val_ = FromVal(v);
if (Traits::kCallbackType != kNotWeak && p.IsWeak()) {
Traits::DisposeCallbackData(
@@ -319,6 +322,12 @@ class PersistentValueMapBase {
return p.Pass();
}
+ void RemoveWeak(const K& key) {
+ Global<V> p;
+ p.val_ = FromVal(Traits::Remove(&impl_, key));
+ p.Reset();
+ }
+
private:
PersistentValueMapBase(PersistentValueMapBase&);
void operator=(PersistentValueMapBase&);
@@ -351,17 +360,17 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
/**
* Put value into map. Depending on Traits::kIsWeak, the value will be held
* by the map strongly or weakly.
- * Returns old value as UniquePersistent.
+ * Returns old value as Global.
*/
- UniquePersistent<V> Set(const K& key, Local<V> value) {
- UniquePersistent<V> persistent(this->isolate(), value);
+ Global<V> Set(const K& key, Local<V> value) {
+ Global<V> persistent(this->isolate(), value);
return SetUnique(key, &persistent);
}
/**
* Put value into map, like Set(const K&, Local<V>).
*/
- UniquePersistent<V> Set(const K& key, UniquePersistent<V> value) {
+ Global<V> Set(const K& key, Global<V> value) {
return SetUnique(key, &value);
}
@@ -369,7 +378,7 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
* Put the value into the map, and set the 'weak' callback when demanded
* by the Traits class.
*/
- UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) {
+ Global<V> SetUnique(const K& key, Global<V>* persistent) {
if (Traits::kCallbackType != kNotWeak) {
Local<V> value(Local<V>::New(this->isolate(), *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
@@ -384,8 +393,8 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
* Put a value into the map and update the reference.
* Restrictions of GetReference apply here as well.
*/
- UniquePersistent<V> Set(const K& key, UniquePersistent<V> value,
- PersistentValueReference* reference) {
+ Global<V> Set(const K& key, Global<V> value,
+ PersistentValueReference* reference) {
*reference = this->Leak(&value);
return SetUnique(key, &value);
}
@@ -406,9 +415,9 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
template <typename K, typename V, typename Traits>
-class PhantomPersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
+class GlobalValueMap : public PersistentValueMapBase<K, V, Traits> {
public:
- explicit PhantomPersistentValueMap(Isolate* isolate)
+ explicit GlobalValueMap(Isolate* isolate)
: PersistentValueMapBase<K, V, Traits>(isolate) {}
typedef
@@ -418,17 +427,17 @@ class PhantomPersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
/**
* Put value into map. Depending on Traits::kIsWeak, the value will be held
* by the map strongly or weakly.
- * Returns old value as UniquePersistent.
+ * Returns old value as Global.
*/
- UniquePersistent<V> Set(const K& key, Local<V> value) {
- UniquePersistent<V> persistent(this->isolate(), value);
+ Global<V> Set(const K& key, Local<V> value) {
+ Global<V> persistent(this->isolate(), value);
return SetUnique(key, &persistent);
}
/**
* Put value into map, like Set(const K&, Local<V>).
*/
- UniquePersistent<V> Set(const K& key, UniquePersistent<V> value) {
+ Global<V> Set(const K& key, Global<V> value) {
return SetUnique(key, &value);
}
@@ -436,11 +445,16 @@ class PhantomPersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
* Put the value into the map, and set the 'weak' callback when demanded
* by the Traits class.
*/
- UniquePersistent<V> SetUnique(const K& key, UniquePersistent<V>* persistent) {
+ Global<V> SetUnique(const K& key, Global<V>* persistent) {
if (Traits::kCallbackType != kNotWeak) {
+ WeakCallbackType callback_type =
+ Traits::kCallbackType == kWeakWithInternalFields
+ ? WeakCallbackType::kInternalFields
+ : WeakCallbackType::kParameter;
Local<V> value(Local<V>::New(this->isolate(), *persistent));
- persistent->template SetPhantom<typename Traits::WeakCallbackDataType>(
- Traits::WeakCallbackParameter(this, key, value), WeakCallback, 0, 1);
+ persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
+ Traits::WeakCallbackParameter(this, key, value), WeakCallback,
+ callback_type);
}
PersistentContainerValue old_value =
Traits::Set(this->impl(), key, this->ClearAndLeak(persistent));
@@ -451,33 +465,32 @@ class PhantomPersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
* Put a value into the map and update the reference.
* Restrictions of GetReference apply here as well.
*/
- UniquePersistent<V> Set(const K& key, UniquePersistent<V> value,
- PersistentValueReference* reference) {
+ Global<V> Set(const K& key, Global<V> value,
+ PersistentValueReference* reference) {
*reference = this->Leak(&value);
return SetUnique(key, &value);
}
private:
static void WeakCallback(
- const PhantomCallbackData<typename Traits::WeakCallbackDataType>& data) {
+ const WeakCallbackInfo<typename Traits::WeakCallbackDataType>& data) {
if (Traits::kCallbackType != kNotWeak) {
- PhantomPersistentValueMap<K, V, Traits>* persistentValueMap =
- Traits::MapFromPhantomCallbackData(data);
- K key = Traits::KeyFromPhantomCallbackData(data);
- Traits::Dispose(data.GetIsolate(), persistentValueMap->Remove(key).Pass(),
- key);
- Traits::DisposeCallbackData(data.GetParameter());
+ GlobalValueMap<K, V, Traits>* persistentValueMap =
+ Traits::MapFromWeakCallbackInfo(data);
+ K key = Traits::KeyFromWeakCallbackInfo(data);
+ persistentValueMap->RemoveWeak(key);
+ Traits::DisposeWeak(data.GetIsolate(), data, key);
}
}
};
/**
- * A map that uses UniquePersistent as value and std::map as the backing
+ * A map that uses Global as value and std::map as the backing
* implementation. Persistents are held non-weak.
*
* C++11 embedders don't need this class, as they can use
- * UniquePersistent directly in std containers.
+ * Global directly in std containers.
*/
template<typename K, typename V,
typename Traits = DefaultPersistentValueMapTraits<K, V> >
@@ -514,8 +527,8 @@ class DefaultPersistentValueVectorTraits {
/**
- * A vector wrapper that safely stores UniquePersistent values.
- * C++11 embedders don't need this class, as they can use UniquePersistent
+ * A vector wrapper that safely stores Global values.
+ * C++11 embedders don't need this class, as they can use Global
* directly in std containers.
*
* This class relies on a backing vector implementation, whose type and methods
@@ -536,14 +549,14 @@ class PersistentValueVector {
* Append a value to the vector.
*/
void Append(Local<V> value) {
- UniquePersistent<V> persistent(isolate_, value);
+ Global<V> persistent(isolate_, value);
Traits::Append(&impl_, ClearAndLeak(&persistent));
}
/**
* Append a persistent's value to the vector.
*/
- void Append(UniquePersistent<V> persistent) {
+ void Append(Global<V> persistent) {
Traits::Append(&impl_, ClearAndLeak(&persistent));
}
@@ -574,7 +587,7 @@ class PersistentValueVector {
void Clear() {
size_t length = Traits::Size(&impl_);
for (size_t i = 0; i < length; i++) {
- UniquePersistent<V> p;
+ Global<V> p;
p.val_ = FromVal(Traits::Get(&impl_, i));
}
Traits::Clear(&impl_);
@@ -589,8 +602,7 @@ class PersistentValueVector {
}
private:
- static PersistentContainerValue ClearAndLeak(
- UniquePersistent<V>* persistent) {
+ static PersistentContainerValue ClearAndLeak(Global<V>* persistent) {
V* v = persistent->val_;
persistent->val_ = 0;
return reinterpret_cast<PersistentContainerValue>(v);
@@ -606,4 +618,4 @@ class PersistentValueVector {
} // namespace v8
-#endif // V8_UTIL_H_
+#endif // V8_UTIL_H
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index b20ccdb436..9cdb125921 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,8 +9,8 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 4
-#define V8_MINOR_VERSION 2
-#define V8_BUILD_NUMBER 77
+#define V8_MINOR_VERSION 3
+#define V8_BUILD_NUMBER 61
#define V8_PATCH_LEVEL 21
// Use 1 for candidates and 0 otherwise.
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index e325fd6d75..d3543f282f 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -81,6 +81,8 @@ class ImplementationUtilities;
class Int32;
class Integer;
class Isolate;
+template <class T>
+class Maybe;
class Name;
class Number;
class NumberObject;
@@ -93,6 +95,7 @@ class Promise;
class RawOperationDescriptor;
class Script;
class Signature;
+class StartupData;
class StackFrame;
class StackTrace;
class String;
@@ -105,17 +108,20 @@ class Utils;
class Value;
template <class T> class Handle;
template <class T> class Local;
+template <class T>
+class MaybeLocal;
template <class T> class Eternal;
template<class T> class NonCopyablePersistentTraits;
template<class T> class PersistentBase;
template<class T,
class M = NonCopyablePersistentTraits<T> > class Persistent;
-template<class T> class UniquePersistent;
+template <class T>
+class Global;
template<class K, class V, class T> class PersistentValueMap;
template <class K, class V, class T>
class PersistentValueMapBase;
template <class K, class V, class T>
-class PhantomPersistentValueMap;
+class GlobalValueMap;
template<class V, class T> class PersistentValueVector;
template<class T, class P> class WeakCallbackObject;
class FunctionTemplate;
@@ -141,20 +147,6 @@ template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
class GlobalHandles;
-
-template <typename T>
-class CallbackData {
- public:
- V8_INLINE v8::Isolate* GetIsolate() const { return isolate_; }
-
- explicit CallbackData(v8::Isolate* isolate, T* parameter)
- : isolate_(isolate), parameter_(parameter) {}
- V8_INLINE T* GetParameter() const { return parameter_; }
-
- private:
- v8::Isolate* isolate_;
- T* parameter_;
-};
}
@@ -321,6 +313,8 @@ template <class T> class Handle {
template<class F> friend class PersistentBase;
template<class F> friend class Handle;
template<class F> friend class Local;
+ template <class F>
+ friend class MaybeLocal;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
template<class F> friend class internal::CustomArguments;
@@ -398,6 +392,8 @@ template <class T> class Local : public Handle<T> {
template<class F, class M> friend class Persistent;
template<class F> friend class Handle;
template<class F> friend class Local;
+ template <class F>
+ friend class MaybeLocal;
template<class F> friend class FunctionCallbackInfo;
template<class F> friend class PropertyCallbackInfo;
friend class String;
@@ -415,6 +411,47 @@ template <class T> class Local : public Handle<T> {
};
+/**
+ * A MaybeLocal<> is a wrapper around Local<> that enforces a check whether
+ * the Local<> is empty before it can be used.
+ *
+ * If an API method returns a MaybeLocal<>, the API method can potentially fail
+ * either because an exception is thrown, or because an exception is pending,
+ * e.g. because a previous API call threw an exception that hasn't been caught
+ * yet, or because a TerminateExecution exception was thrown. In that case, an
+ * empty MaybeLocal is returned.
+ */
+template <class T>
+class MaybeLocal {
+ public:
+ V8_INLINE MaybeLocal() : val_(nullptr) {}
+ template <class S>
+ V8_INLINE MaybeLocal(Local<S> that)
+ : val_(reinterpret_cast<T*>(*that)) {
+ TYPE_CHECK(T, S);
+ }
+
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+
+ template <class S>
+ V8_WARN_UNUSED_RESULT V8_INLINE bool ToLocal(Local<S>* out) const {
+ out->val_ = IsEmpty() ? nullptr : this->val_;
+ return !IsEmpty();
+ }
+
+ // Will crash when checks are enabled if the MaybeLocal<> is empty.
+ V8_INLINE Local<T> ToLocalChecked();
+
+ template <class S>
+ V8_INLINE Local<S> FromMaybe(Local<S> default_value) const {
+ return IsEmpty() ? default_value : Local<S>(val_);
+ }
+
+ private:
+ T* val_;
+};
+
+
// Eternal handles are set-once handles that live for the life of the isolate.
template <class T> class Eternal {
public:
@@ -434,42 +471,79 @@ template <class T> class Eternal {
};
+static const int kInternalFieldsInWeakCallback = 2;
+
+
template <typename T>
-class PhantomCallbackData : public internal::CallbackData<T> {
+class WeakCallbackInfo {
public:
- typedef void (*Callback)(const PhantomCallbackData<T>& data);
+ typedef void (*Callback)(const WeakCallbackInfo<T>& data);
+
+ WeakCallbackInfo(Isolate* isolate, T* parameter,
+ void* internal_fields[kInternalFieldsInWeakCallback],
+ Callback* callback)
+ : isolate_(isolate), parameter_(parameter), callback_(callback) {
+ for (int i = 0; i < kInternalFieldsInWeakCallback; ++i) {
+ internal_fields_[i] = internal_fields[i];
+ }
+ }
+
+ V8_INLINE Isolate* GetIsolate() const { return isolate_; }
+ V8_INLINE T* GetParameter() const { return parameter_; }
+ V8_INLINE void* GetInternalField(int index) const;
- V8_INLINE void* GetInternalField1() const { return internal_field1_; }
- V8_INLINE void* GetInternalField2() const { return internal_field2_; }
+ V8_INLINE V8_DEPRECATE_SOON("use indexed version",
+ void* GetInternalField1()) const {
+ return internal_fields_[0];
+ }
+ V8_INLINE V8_DEPRECATE_SOON("use indexed version",
+ void* GetInternalField2()) const {
+ return internal_fields_[1];
+ }
- PhantomCallbackData(Isolate* isolate, T* parameter, void* internal_field1,
- void* internal_field2)
- : internal::CallbackData<T>(isolate, parameter),
- internal_field1_(internal_field1),
- internal_field2_(internal_field2) {}
+ bool IsFirstPass() const { return callback_ != nullptr; }
+
+ // When first called, the embedder MUST Reset() the Global which triggered the
+ // callback. The Global itself is unusable for anything else. No v8 other api
+ // calls may be called in the first callback. Should additional work be
+ // required, the embedder must set a second pass callback, which will be
+ // called after all the initial callbacks are processed.
+ // Calling SetSecondPassCallback on the second pass will immediately crash.
+ void SetSecondPassCallback(Callback callback) const { *callback_ = callback; }
private:
- void* internal_field1_;
- void* internal_field2_;
+ Isolate* isolate_;
+ T* parameter_;
+ Callback* callback_;
+ void* internal_fields_[kInternalFieldsInWeakCallback];
};
template <class T, class P>
-class WeakCallbackData : public internal::CallbackData<P> {
+class WeakCallbackData {
public:
typedef void (*Callback)(const WeakCallbackData<T, P>& data);
+ WeakCallbackData(Isolate* isolate, P* parameter, Local<T> handle)
+ : isolate_(isolate), parameter_(parameter), handle_(handle) {}
+
+ V8_INLINE Isolate* GetIsolate() const { return isolate_; }
+ V8_INLINE P* GetParameter() const { return parameter_; }
V8_INLINE Local<T> GetValue() const { return handle_; }
private:
- friend class internal::GlobalHandles;
- WeakCallbackData(Isolate* isolate, P* parameter, Local<T> handle)
- : internal::CallbackData<P>(isolate, parameter), handle_(handle) {}
+ Isolate* isolate_;
+ P* parameter_;
Local<T> handle_;
};
-static const int kNoInternalFieldIndex = -1;
+// TODO(dcarney): delete this with WeakCallbackData
+template <class T>
+using PhantomCallbackData = WeakCallbackInfo<T>;
+
+
+enum class WeakCallbackType { kParameter, kInternalFields };
/**
@@ -542,15 +616,17 @@ template <class T> class PersistentBase {
* As always, GC-based finalization should *not* be relied upon for any
* critical form of resource management!
*/
- template<typename P>
- V8_INLINE void SetWeak(
- P* parameter,
- typename WeakCallbackData<T, P>::Callback callback);
+ template <typename P>
+ V8_INLINE V8_DEPRECATE_SOON(
+ "use WeakCallbackInfo version",
+ void SetWeak(P* parameter,
+ typename WeakCallbackData<T, P>::Callback callback));
- template<typename S, typename P>
- V8_INLINE void SetWeak(
- P* parameter,
- typename WeakCallbackData<S, P>::Callback callback);
+ template <typename S, typename P>
+ V8_INLINE V8_DEPRECATE_SOON(
+ "use WeakCallbackInfo version",
+ void SetWeak(P* parameter,
+ typename WeakCallbackData<S, P>::Callback callback));
// Phantom persistents work like weak persistents, except that the pointer to
// the object being collected is not available in the finalization callback.
@@ -559,10 +635,17 @@ template <class T> class PersistentBase {
// specify a parameter for the callback or the location of two internal
// fields in the dying object.
template <typename P>
- V8_INLINE void SetPhantom(P* parameter,
- typename PhantomCallbackData<P>::Callback callback,
- int internal_field_index1 = kNoInternalFieldIndex,
- int internal_field_index2 = kNoInternalFieldIndex);
+ V8_INLINE V8_DEPRECATE_SOON(
+ "use SetWeak",
+ void SetPhantom(P* parameter,
+ typename WeakCallbackInfo<P>::Callback callback,
+ int internal_field_index1 = -1,
+ int internal_field_index2 = -1));
+
+ template <typename P>
+ V8_INLINE void SetWeak(P* parameter,
+ typename WeakCallbackInfo<P>::Callback callback,
+ WeakCallbackType type);
template<typename P>
V8_INLINE P* ClearWeak();
@@ -614,7 +697,8 @@ template <class T> class PersistentBase {
template<class F> friend class Handle;
template<class F> friend class Local;
template<class F1, class F2> friend class Persistent;
- template<class F> friend class UniquePersistent;
+ template <class F>
+ friend class Global;
template<class F> friend class PersistentBase;
template<class F> friend class ReturnValue;
template <class F1, class F2, class F3>
@@ -623,8 +707,8 @@ template <class T> class PersistentBase {
friend class Object;
explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
- PersistentBase(PersistentBase& other); // NOLINT
- void operator=(PersistentBase&);
+ PersistentBase(PersistentBase& other) = delete; // NOLINT
+ void operator=(PersistentBase&) = delete;
V8_INLINE static T* New(Isolate* isolate, T* that);
T* val_;
@@ -770,72 +854,74 @@ template <class T, class M> class Persistent : public PersistentBase<T> {
*
* Note: Persistent class hierarchy is subject to future changes.
*/
-template<class T>
-class UniquePersistent : public PersistentBase<T> {
- struct RValue {
- V8_INLINE explicit RValue(UniquePersistent* obj) : object(obj) {}
- UniquePersistent* object;
- };
-
+template <class T>
+class Global : public PersistentBase<T> {
public:
/**
- * A UniquePersistent with no storage cell.
+ * A Global with no storage cell.
*/
- V8_INLINE UniquePersistent() : PersistentBase<T>(0) { }
+ V8_INLINE Global() : PersistentBase<T>(nullptr) {}
/**
- * Construct a UniquePersistent from a Handle.
+ * Construct a Global from a Handle.
* When the Handle is non-empty, a new storage cell is created
* pointing to the same object, and no flags are set.
*/
template <class S>
- V8_INLINE UniquePersistent(Isolate* isolate, Handle<S> that)
+ V8_INLINE Global(Isolate* isolate, Handle<S> that)
: PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
TYPE_CHECK(T, S);
}
/**
- * Construct a UniquePersistent from a PersistentBase.
+ * Construct a Global from a PersistentBase.
* When the Persistent is non-empty, a new storage cell is created
* pointing to the same object, and no flags are set.
*/
template <class S>
- V8_INLINE UniquePersistent(Isolate* isolate, const PersistentBase<S>& that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
+ V8_INLINE Global(Isolate* isolate, const PersistentBase<S>& that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
TYPE_CHECK(T, S);
}
/**
* Move constructor.
*/
- V8_INLINE UniquePersistent(RValue rvalue)
- : PersistentBase<T>(rvalue.object->val_) {
- rvalue.object->val_ = 0;
+ V8_INLINE Global(Global&& other) : PersistentBase<T>(other.val_) {
+ other.val_ = nullptr;
}
- V8_INLINE ~UniquePersistent() { this->Reset(); }
+ V8_INLINE ~Global() { this->Reset(); }
/**
* Move via assignment.
*/
- template<class S>
- V8_INLINE UniquePersistent& operator=(UniquePersistent<S> rhs) {
+ template <class S>
+ V8_INLINE Global& operator=(Global<S>&& rhs) {
TYPE_CHECK(T, S);
- this->Reset();
- this->val_ = rhs.val_;
- rhs.val_ = 0;
+ if (this != &rhs) {
+ this->Reset();
+ this->val_ = rhs.val_;
+ rhs.val_ = nullptr;
+ }
return *this;
}
/**
- * Cast operator for moves.
- */
- V8_INLINE operator RValue() { return RValue(this); }
- /**
* Pass allows returning uniques from functions, etc.
*/
- UniquePersistent Pass() { return UniquePersistent(RValue(this)); }
+ Global Pass() { return static_cast<Global&&>(*this); }
+
+ /*
+ * For compatibility with Chromium's base::Bind (base::Passed).
+ */
+ typedef void MoveOnlyTypeForCPP03;
private:
- UniquePersistent(UniquePersistent&);
- void operator=(UniquePersistent&);
+ Global(Global&) = delete;
+ void operator=(Global&) = delete;
};
+// UniquePersistent is an alias for Global for historical reason.
+template <class T>
+using UniquePersistent = Global<T>;
+
+
/**
* A stack-allocated class that governs a number of local handles.
* After a handle scope has been created, all local handles will be
@@ -951,28 +1037,6 @@ class V8_EXPORT SealHandleScope {
};
-/**
- * A simple Maybe type, representing an object which may or may not have a
- * value.
- */
-template<class T>
-struct Maybe {
- Maybe() : has_value(false) {}
- explicit Maybe(T t) : has_value(true), value(t) {}
- Maybe(bool has, T t) : has_value(has), value(t) {}
-
- bool has_value;
- T value;
-};
-
-
-// Convenience wrapper.
-template <class T>
-inline Maybe<T> maybe(T t) {
- return Maybe<T>(t);
-}
-
-
// --- Special objects ---
@@ -996,13 +1060,15 @@ class ScriptOrigin {
Handle<Integer> resource_column_offset = Handle<Integer>(),
Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>(),
Handle<Integer> script_id = Handle<Integer>(),
- Handle<Boolean> resource_is_embedder_debug_script = Handle<Boolean>())
+ Handle<Boolean> resource_is_embedder_debug_script = Handle<Boolean>(),
+ Handle<Value> source_map_url = Handle<Value>())
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset),
resource_is_embedder_debug_script_(resource_is_embedder_debug_script),
resource_is_shared_cross_origin_(resource_is_shared_cross_origin),
- script_id_(script_id) {}
+ script_id_(script_id),
+ source_map_url_(source_map_url) {}
V8_INLINE Handle<Value> ResourceName() const;
V8_INLINE Handle<Integer> ResourceLineOffset() const;
V8_INLINE Handle<Integer> ResourceColumnOffset() const;
@@ -1012,6 +1078,7 @@ class ScriptOrigin {
V8_INLINE Handle<Boolean> ResourceIsEmbedderDebugScript() const;
V8_INLINE Handle<Boolean> ResourceIsSharedCrossOrigin() const;
V8_INLINE Handle<Integer> ScriptID() const;
+ V8_INLINE Handle<Value> SourceMapUrl() const;
private:
Handle<Value> resource_name_;
@@ -1020,6 +1087,7 @@ class ScriptOrigin {
Handle<Boolean> resource_is_embedder_debug_script_;
Handle<Boolean> resource_is_shared_cross_origin_;
Handle<Integer> script_id_;
+ Handle<Value> source_map_url_;
};
@@ -1064,19 +1132,25 @@ class V8_EXPORT Script {
/**
* A shorthand for ScriptCompiler::Compile().
*/
- static Local<Script> Compile(Handle<String> source,
- ScriptOrigin* origin = NULL);
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<Script> Compile(Handle<String> source,
+ ScriptOrigin* origin = nullptr));
+ static MaybeLocal<Script> Compile(Local<Context> context,
+ Handle<String> source,
+ ScriptOrigin* origin = nullptr);
- // To be decprecated, use the Compile above.
- static Local<Script> Compile(Handle<String> source,
- Handle<String> file_name);
+ static Local<Script> V8_DEPRECATE_SOON("Use maybe version",
+ Compile(Handle<String> source,
+ Handle<String> file_name));
/**
* Runs the script returning the resulting value. It will be run in the
* context in which it was created (ScriptCompiler::CompileBound or
* UnboundScript::BindToCurrentContext()).
*/
- Local<Value> Run();
+ V8_DEPRECATE_SOON("Use maybe version", Local<Value> Run());
+ MaybeLocal<Value> Run(Local<Context> context);
/**
* Returns the corresponding context-unbound script.
@@ -1165,6 +1239,7 @@ class V8_EXPORT ScriptCompiler {
Handle<Integer> resource_column_offset;
Handle<Boolean> resource_is_embedder_debug_script;
Handle<Boolean> resource_is_shared_cross_origin;
+ Handle<Value> source_map_url;
// Cached data from previous compilation (if a kConsume*Cache flag is
// set), or hold newly generated cache data (kProduce*Cache flags) are
@@ -1263,7 +1338,11 @@ class V8_EXPORT ScriptCompiler {
* \return Compiled script object (context independent; for running it must be
* bound to a context).
*/
- static Local<UnboundScript> CompileUnbound(
+ static V8_DEPRECATE_SOON("Use maybe version",
+ Local<UnboundScript> CompileUnbound(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions));
+ static MaybeLocal<UnboundScript> CompileUnboundScript(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions);
@@ -1278,9 +1357,12 @@ class V8_EXPORT ScriptCompiler {
* when this function was called. When run it will always use this
* context.
*/
- static Local<Script> Compile(
- Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions);
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<Script> Compile(Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions));
+ static MaybeLocal<Script> Compile(Local<Context> context, Source* source,
+ CompileOptions options = kNoCompileOptions);
/**
* Returns a task which streams script data into V8, or NULL if the script
@@ -1304,9 +1386,15 @@ class V8_EXPORT ScriptCompiler {
* (ScriptStreamingTask has been run). V8 doesn't construct the source string
* during streaming, so the embedder needs to pass the full source here.
*/
- static Local<Script> Compile(Isolate* isolate, StreamedSource* source,
- Handle<String> full_source_string,
- const ScriptOrigin& origin);
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<Script> Compile(Isolate* isolate, StreamedSource* source,
+ Handle<String> full_source_string,
+ const ScriptOrigin& origin));
+ static MaybeLocal<Script> Compile(Local<Context> context,
+ StreamedSource* source,
+ Handle<String> full_source_string,
+ const ScriptOrigin& origin);
/**
* Return a version tag for CachedData for the current V8 version & flags.
@@ -1336,8 +1424,12 @@ class V8_EXPORT ScriptCompiler {
* TODO(adamk): Script is likely the wrong return value for this;
* should return some new Module type.
*/
- static Local<Script> CompileModule(
- Isolate* isolate, Source* source,
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<Script> CompileModule(Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions));
+ static MaybeLocal<Script> CompileModule(
+ Local<Context> context, Source* source,
CompileOptions options = kNoCompileOptions);
/**
@@ -1350,16 +1442,21 @@ class V8_EXPORT ScriptCompiler {
* It is possible to specify multiple context extensions (obj in the above
* example).
*/
- static Local<Function> CompileFunctionInContext(
- Isolate* isolate, Source* source, Local<Context> context,
- size_t arguments_count, Local<String> arguments[],
- size_t context_extension_count, Local<Object> context_extensions[]);
+ static V8_DEPRECATE_SOON("Use maybe version",
+ Local<Function> CompileFunctionInContext(
+ Isolate* isolate, Source* source,
+ Local<Context> context, size_t arguments_count,
+ Local<String> arguments[],
+ size_t context_extension_count,
+ Local<Object> context_extensions[]));
+ static MaybeLocal<Function> CompileFunctionInContext(
+ Local<Context> context, Source* source, size_t arguments_count,
+ Local<String> arguments[], size_t context_extension_count,
+ Local<Object> context_extensions[]);
private:
- static Local<UnboundScript> CompileUnboundInternal(Isolate* isolate,
- Source* source,
- CompileOptions options,
- bool is_module);
+ static MaybeLocal<UnboundScript> CompileUnboundInternal(
+ Isolate* isolate, Source* source, CompileOptions options, bool is_module);
};
@@ -1369,7 +1466,9 @@ class V8_EXPORT ScriptCompiler {
class V8_EXPORT Message {
public:
Local<String> Get() const;
- Local<String> GetSourceLine() const;
+
+ V8_DEPRECATE_SOON("Use maybe version", Local<String> GetSourceLine()) const;
+ MaybeLocal<String> GetSourceLine(Local<Context> context) const;
/**
* Returns the origin for the script from where the function causing the
@@ -1393,7 +1492,8 @@ class V8_EXPORT Message {
/**
* Returns the number, 1-based, of the line where the error occurred.
*/
- int GetLineNumber() const;
+ V8_DEPRECATE_SOON("Use maybe version", int GetLineNumber()) const;
+ Maybe<int> GetLineNumber(Local<Context> context) const;
/**
* Returns the index within the script of the first character where
@@ -1411,13 +1511,15 @@ class V8_EXPORT Message {
* Returns the index within the line of the first character where
* the error occurred.
*/
- int GetStartColumn() const;
+ V8_DEPRECATE_SOON("Use maybe version", int GetStartColumn()) const;
+ Maybe<int> GetStartColumn(Local<Context> context) const;
/**
* Returns the index within the line of the last character where
* the error occurred.
*/
- int GetEndColumn() const;
+ V8_DEPRECATE_SOON("Use maybe version", int GetEndColumn()) const;
+ Maybe<int> GetEndColumn(Local<Context> context) const;
/**
* Passes on the value set by the embedder when it fed the script from which
@@ -1584,7 +1686,9 @@ class V8_EXPORT JSON {
* \param json_string The string to parse.
* \return The corresponding value if successfully parsed.
*/
- static Local<Value> Parse(Local<String> json_string);
+ static V8_DEPRECATE_SOON("Use maybe version",
+ Local<Value> Parse(Local<String> json_string));
+ static MaybeLocal<Value> Parse(Isolate* isolate, Local<String> json_string);
};
@@ -1864,39 +1968,66 @@ class V8_EXPORT Value : public Data {
*/
bool IsDataView() const;
- Local<Boolean> ToBoolean(Isolate* isolate) const;
- Local<Number> ToNumber(Isolate* isolate) const;
- Local<String> ToString(Isolate* isolate) const;
- Local<String> ToDetailString(Isolate* isolate) const;
- Local<Object> ToObject(Isolate* isolate) const;
- Local<Integer> ToInteger(Isolate* isolate) const;
- Local<Uint32> ToUint32(Isolate* isolate) const;
- Local<Int32> ToInt32(Isolate* isolate) const;
-
- // TODO(dcarney): deprecate all these.
- inline Local<Boolean> ToBoolean() const;
- inline Local<Number> ToNumber() const;
- inline Local<String> ToString() const;
- inline Local<String> ToDetailString() const;
- inline Local<Object> ToObject() const;
- inline Local<Integer> ToInteger() const;
- inline Local<Uint32> ToUint32() const;
- inline Local<Int32> ToInt32() const;
+ MaybeLocal<Boolean> ToBoolean(Local<Context> context) const;
+ MaybeLocal<Number> ToNumber(Local<Context> context) const;
+ MaybeLocal<String> ToString(Local<Context> context) const;
+ MaybeLocal<String> ToDetailString(Local<Context> context) const;
+ MaybeLocal<Object> ToObject(Local<Context> context) const;
+ MaybeLocal<Integer> ToInteger(Local<Context> context) const;
+ MaybeLocal<Uint32> ToUint32(Local<Context> context) const;
+ MaybeLocal<Int32> ToInt32(Local<Context> context) const;
+
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Boolean> ToBoolean(Isolate* isolate)) const;
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Number> ToNumber(Isolate* isolate)) const;
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<String> ToString(Isolate* isolate)) const;
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<String> ToDetailString(Isolate* isolate)) const;
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Object> ToObject(Isolate* isolate)) const;
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Integer> ToInteger(Isolate* isolate)) const;
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Uint32> ToUint32(Isolate* isolate)) const;
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Int32> ToInt32(Isolate* isolate)) const;
+
+ inline V8_DEPRECATE_SOON("Use maybe version",
+ Local<Boolean> ToBoolean()) const;
+ inline V8_DEPRECATE_SOON("Use maybe version", Local<Number> ToNumber()) const;
+ inline V8_DEPRECATE_SOON("Use maybe version", Local<String> ToString()) const;
+ inline V8_DEPRECATE_SOON("Use maybe version",
+ Local<String> ToDetailString()) const;
+ inline V8_DEPRECATE_SOON("Use maybe version", Local<Object> ToObject()) const;
+ inline V8_DEPRECATE_SOON("Use maybe version",
+ Local<Integer> ToInteger()) const;
+ inline V8_DEPRECATE_SOON("Use maybe version", Local<Uint32> ToUint32()) const;
+ inline V8_DEPRECATE_SOON("Use maybe version", Local<Int32> ToInt32()) const;
/**
* Attempts to convert a string to an array index.
* Returns an empty handle if the conversion fails.
*/
- Local<Uint32> ToArrayIndex() const;
+ V8_DEPRECATE_SOON("Use maybe version", Local<Uint32> ToArrayIndex()) const;
+ MaybeLocal<Uint32> ToArrayIndex(Local<Context> context) const;
- bool BooleanValue() const;
- double NumberValue() const;
- int64_t IntegerValue() const;
- uint32_t Uint32Value() const;
- int32_t Int32Value() const;
+ Maybe<bool> BooleanValue(Local<Context> context) const;
+ Maybe<double> NumberValue(Local<Context> context) const;
+ Maybe<int64_t> IntegerValue(Local<Context> context) const;
+ Maybe<uint32_t> Uint32Value(Local<Context> context) const;
+ Maybe<int32_t> Int32Value(Local<Context> context) const;
+
+ V8_DEPRECATE_SOON("Use maybe version", bool BooleanValue()) const;
+ V8_DEPRECATE_SOON("Use maybe version", double NumberValue()) const;
+ V8_DEPRECATE_SOON("Use maybe version", int64_t IntegerValue()) const;
+ V8_DEPRECATE_SOON("Use maybe version", uint32_t Uint32Value()) const;
+ V8_DEPRECATE_SOON("Use maybe version", int32_t Int32Value()) const;
/** JS == */
- bool Equals(Handle<Value> that) const;
+ V8_DEPRECATE_SOON("Use maybe version", bool Equals(Handle<Value> that)) const;
+ Maybe<bool> Equals(Local<Context> context, Handle<Value> that) const;
bool StrictEquals(Handle<Value> that) const;
bool SameValue(Handle<Value> that) const;
@@ -1925,7 +2056,10 @@ class V8_EXPORT Primitive : public Value { };
class V8_EXPORT Boolean : public Primitive {
public:
bool Value() const;
+ V8_INLINE static Boolean* Cast(v8::Value* obj);
V8_INLINE static Handle<Boolean> New(Isolate* isolate, bool value);
+ private:
+ static void CheckCast(v8::Value* obj);
};
@@ -1949,11 +2083,16 @@ class V8_EXPORT Name : public Primitive {
};
+enum class NewStringType { kNormal, kInternalized };
+
+
/**
* A JavaScript string value (ECMA-262, 4.3.17).
*/
class V8_EXPORT String : public Name {
public:
+ static const int kMaxLength = (1 << 28) - 16;
+
enum Encoding {
UNKNOWN_ENCODING = 0x1,
TWO_BYTE_ENCODING = 0x0,
@@ -2150,26 +2289,52 @@ class V8_EXPORT String : public Name {
V8_INLINE static String* Cast(v8::Value* obj);
- enum NewStringType { kNormalString, kInternalizedString };
+ // TODO(dcarney): remove with deprecation of New functions.
+ enum NewStringType {
+ kNormalString = static_cast<int>(v8::NewStringType::kNormal),
+ kInternalizedString = static_cast<int>(v8::NewStringType::kInternalized)
+ };
/** Allocates a new string from UTF-8 data.*/
- static Local<String> NewFromUtf8(Isolate* isolate, const char* data,
- NewStringType type = kNormalString,
- int length = -1);
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<String> NewFromUtf8(Isolate* isolate, const char* data,
+ NewStringType type = kNormalString,
+ int length = -1));
+
+ /** Allocates a new string from UTF-8 data. Only returns an empty value when
+ * length > kMaxLength. **/
+ static MaybeLocal<String> NewFromUtf8(Isolate* isolate, const char* data,
+ v8::NewStringType type,
+ int length = -1);
/** Allocates a new string from Latin-1 data.*/
- static Local<String> NewFromOneByte(
- Isolate* isolate,
- const uint8_t* data,
- NewStringType type = kNormalString,
- int length = -1);
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<String> NewFromOneByte(Isolate* isolate, const uint8_t* data,
+ NewStringType type = kNormalString,
+ int length = -1));
+
+ /** Allocates a new string from Latin-1 data. Only returns an empty value
+ * when length > kMaxLength. **/
+ static MaybeLocal<String> NewFromOneByte(Isolate* isolate,
+ const uint8_t* data,
+ v8::NewStringType type,
+ int length = -1);
/** Allocates a new string from UTF-16 data.*/
- static Local<String> NewFromTwoByte(
- Isolate* isolate,
- const uint16_t* data,
- NewStringType type = kNormalString,
- int length = -1);
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<String> NewFromTwoByte(Isolate* isolate, const uint16_t* data,
+ NewStringType type = kNormalString,
+ int length = -1));
+
+ /** Allocates a new string from UTF-16 data. Only returns an empty value when
+ * length > kMaxLength. **/
+ static MaybeLocal<String> NewFromTwoByte(Isolate* isolate,
+ const uint16_t* data,
+ v8::NewStringType type,
+ int length = -1);
/**
* Creates a new string by concatenating the left and the right strings
@@ -2185,8 +2350,12 @@ class V8_EXPORT String : public Name {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static Local<String> NewExternal(Isolate* isolate,
- ExternalStringResource* resource);
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<String> NewExternal(Isolate* isolate,
+ ExternalStringResource* resource));
+ static MaybeLocal<String> NewExternalTwoByte(
+ Isolate* isolate, ExternalStringResource* resource);
/**
* Associate an external string resource with this string by transforming it
@@ -2207,8 +2376,12 @@ class V8_EXPORT String : public Name {
* should the underlying buffer be deallocated or modified except through the
* destructor of the external string resource.
*/
- static Local<String> NewExternal(Isolate* isolate,
- ExternalOneByteStringResource* resource);
+ static V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<String> NewExternal(Isolate* isolate,
+ ExternalOneByteStringResource* resource));
+ static MaybeLocal<String> NewExternalOneByte(
+ Isolate* isolate, ExternalOneByteStringResource* resource);
/**
* Associate an external string resource with this string by transforming it
@@ -2380,8 +2553,11 @@ class V8_EXPORT Integer : public Number {
class V8_EXPORT Int32 : public Integer {
public:
int32_t Value() const;
+ V8_INLINE static Int32* Cast(v8::Value* obj);
+
private:
Int32();
+ static void CheckCast(v8::Value* obj);
};
@@ -2391,8 +2567,11 @@ class V8_EXPORT Int32 : public Integer {
class V8_EXPORT Uint32 : public Integer {
public:
uint32_t Value() const;
+ V8_INLINE static Uint32* Cast(v8::Value* obj);
+
private:
Uint32();
+ static void CheckCast(v8::Value* obj);
};
@@ -2471,9 +2650,13 @@ enum AccessControl {
*/
class V8_EXPORT Object : public Value {
public:
- bool Set(Handle<Value> key, Handle<Value> value);
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool Set(Handle<Value> key, Handle<Value> value));
+ Maybe<bool> Set(Local<Context> context, Local<Value> key, Local<Value> value);
- bool Set(uint32_t index, Handle<Value> value);
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool Set(uint32_t index, Handle<Value> value));
+ Maybe<bool> Set(Local<Context> context, uint32_t index, Local<Value> value);
// Sets an own property on this object bypassing interceptors and
// overriding accessors or read-only properties.
@@ -2483,46 +2666,68 @@ class V8_EXPORT Object : public Value {
// will only be returned if the interceptor doesn't return a value.
//
// Note also that this only works for named properties.
- bool ForceSet(Handle<Value> key,
- Handle<Value> value,
- PropertyAttribute attribs = None);
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool ForceSet(Handle<Value> key, Handle<Value> value,
+ PropertyAttribute attribs = None));
+ Maybe<bool> ForceSet(Local<Context> context, Local<Value> key,
+ Local<Value> value, PropertyAttribute attribs = None);
- Local<Value> Get(Handle<Value> key);
+ V8_DEPRECATE_SOON("Use maybe version", Local<Value> Get(Handle<Value> key));
+ MaybeLocal<Value> Get(Local<Context> context, Local<Value> key);
- Local<Value> Get(uint32_t index);
+ V8_DEPRECATE_SOON("Use maybe version", Local<Value> Get(uint32_t index));
+ MaybeLocal<Value> Get(Local<Context> context, uint32_t index);
/**
* Gets the property attributes of a property which can be None or
* any combination of ReadOnly, DontEnum and DontDelete. Returns
* None when the property doesn't exist.
*/
- PropertyAttribute GetPropertyAttributes(Handle<Value> key);
+ V8_DEPRECATE_SOON("Use maybe version",
+ PropertyAttribute GetPropertyAttributes(Handle<Value> key));
+ Maybe<PropertyAttribute> GetPropertyAttributes(Local<Context> context,
+ Local<Value> key);
/**
* Returns Object.getOwnPropertyDescriptor as per ES5 section 15.2.3.3.
*/
- Local<Value> GetOwnPropertyDescriptor(Local<String> key);
-
- bool Has(Handle<Value> key);
-
- bool Delete(Handle<Value> key);
-
- bool Has(uint32_t index);
-
- bool Delete(uint32_t index);
-
- bool SetAccessor(Handle<String> name,
- AccessorGetterCallback getter,
- AccessorSetterCallback setter = 0,
- Handle<Value> data = Handle<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None);
- bool SetAccessor(Handle<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = 0,
- Handle<Value> data = Handle<Value>(),
- AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Value> GetOwnPropertyDescriptor(Local<String> key));
+ MaybeLocal<Value> GetOwnPropertyDescriptor(Local<Context> context,
+ Local<String> key);
+
+ V8_DEPRECATE_SOON("Use maybe version", bool Has(Handle<Value> key));
+ Maybe<bool> Has(Local<Context> context, Local<Value> key);
+
+ V8_DEPRECATE_SOON("Use maybe version", bool Delete(Handle<Value> key));
+ Maybe<bool> Delete(Local<Context> context, Local<Value> key);
+
+ V8_DEPRECATE_SOON("Use maybe version", bool Has(uint32_t index));
+ Maybe<bool> Has(Local<Context> context, uint32_t index);
+
+ V8_DEPRECATE_SOON("Use maybe version", bool Delete(uint32_t index));
+ Maybe<bool> Delete(Local<Context> context, uint32_t index);
+
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool SetAccessor(Handle<String> name,
+ AccessorGetterCallback getter,
+ AccessorSetterCallback setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None));
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool SetAccessor(Handle<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = 0,
+ Handle<Value> data = Handle<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None));
+ Maybe<bool> SetAccessor(Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = 0,
+ MaybeLocal<Value> data = MaybeLocal<Value>(),
+ AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None);
void SetAccessorProperty(Local<Name> name,
Local<Function> getter,
@@ -2536,6 +2741,7 @@ class V8_EXPORT Object : public Value {
* Note: Private properties are inherited. Do not rely on this, since it may
* change.
*/
+ // TODO(dcarney): convert these or remove?
bool HasPrivate(Handle<Private> key);
bool SetPrivate(Handle<Private> key, Handle<Value> value);
bool DeletePrivate(Handle<Private> key);
@@ -2547,14 +2753,16 @@ class V8_EXPORT Object : public Value {
* array returned by this method contains the same values as would
* be enumerated by a for-in statement over this object.
*/
- Local<Array> GetPropertyNames();
+ V8_DEPRECATE_SOON("Use maybe version", Local<Array> GetPropertyNames());
+ MaybeLocal<Array> GetPropertyNames(Local<Context> context);
/**
* This function has the same functionality as GetPropertyNames but
* the returned array doesn't contain the names of properties from
* prototype objects.
*/
- Local<Array> GetOwnPropertyNames();
+ V8_DEPRECATE_SOON("Use maybe version", Local<Array> GetOwnPropertyNames());
+ MaybeLocal<Array> GetOwnPropertyNames(Local<Context> context);
/**
* Get the prototype object. This does not skip objects marked to
@@ -2568,7 +2776,9 @@ class V8_EXPORT Object : public Value {
* be skipped by __proto__ and it does not consult the security
* handler.
*/
- bool SetPrototype(Handle<Value> prototype);
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool SetPrototype(Handle<Value> prototype));
+ Maybe<bool> SetPrototype(Local<Context> context, Local<Value> prototype);
/**
* Finds an instance of the given function template in the prototype
@@ -2581,7 +2791,8 @@ class V8_EXPORT Object : public Value {
* This is different from Value::ToString() that may call
* user-defined toString function. This one does not.
*/
- Local<String> ObjectProtoToString();
+ V8_DEPRECATE_SOON("Use maybe version", Local<String> ObjectProtoToString());
+ MaybeLocal<String> ObjectProtoToString(Local<Context> context);
/**
* Returns the name of the function invoked as a constructor for this object.
@@ -2624,23 +2835,62 @@ class V8_EXPORT Object : public Value {
void SetAlignedPointerInInternalField(int index, void* value);
// Testers for local properties.
- bool HasOwnProperty(Handle<String> key);
- bool HasRealNamedProperty(Handle<String> key);
- bool HasRealIndexedProperty(uint32_t index);
- bool HasRealNamedCallbackProperty(Handle<String> key);
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool HasOwnProperty(Handle<String> key));
+ Maybe<bool> HasOwnProperty(Local<Context> context, Local<Name> key);
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool HasRealNamedProperty(Handle<String> key));
+ Maybe<bool> HasRealNamedProperty(Local<Context> context, Local<Name> key);
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool HasRealIndexedProperty(uint32_t index));
+ Maybe<bool> HasRealIndexedProperty(Local<Context> context, uint32_t index);
+ V8_DEPRECATE_SOON("Use maybe version",
+ bool HasRealNamedCallbackProperty(Handle<String> key));
+ Maybe<bool> HasRealNamedCallbackProperty(Local<Context> context,
+ Local<Name> key);
/**
* If result.IsEmpty() no real property was located in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- Local<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key);
+ V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Local<Value> GetRealNamedPropertyInPrototypeChain(Handle<String> key));
+ MaybeLocal<Value> GetRealNamedPropertyInPrototypeChain(Local<Context> context,
+ Local<Name> key);
+
+ /**
+ * Gets the property attributes of a real property in the prototype chain,
+ * which can be None or any combination of ReadOnly, DontEnum and DontDelete.
+ * Interceptors in the prototype chain are not called.
+ */
+ V8_DEPRECATE_SOON(
+ "Use maybe version",
+ Maybe<PropertyAttribute> GetRealNamedPropertyAttributesInPrototypeChain(
+ Handle<String> key));
+ Maybe<PropertyAttribute> GetRealNamedPropertyAttributesInPrototypeChain(
+ Local<Context> context, Local<Name> key);
/**
* If result.IsEmpty() no real property was located on the object or
* in the prototype chain.
* This means interceptors in the prototype chain are not called.
*/
- Local<Value> GetRealNamedProperty(Handle<String> key);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Value> GetRealNamedProperty(Handle<String> key));
+ MaybeLocal<Value> GetRealNamedProperty(Local<Context> context,
+ Local<Name> key);
+
+ /**
+ * Gets the property attributes of a real property which can be
+ * None or any combination of ReadOnly, DontEnum and DontDelete.
+ * Interceptors in the prototype chain are not called.
+ */
+ V8_DEPRECATE_SOON("Use maybe version",
+ Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
+ Handle<String> key));
+ Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
+ Local<Context> context, Local<Name> key);
/** Tests for a named lookup interceptor.*/
bool HasNamedLookupInterceptor();
@@ -2653,7 +2903,7 @@ class V8_EXPORT Object : public Value {
* a template that has access check callbacks. If an object has no
* access check info, the object cannot be accessed by anyone.
*/
- void TurnOnAccessCheck();
+ V8_DEPRECATE_SOON("No alternative", void TurnOnAccessCheck());
/**
* Returns the identity hash for this object. The current implementation
@@ -2670,6 +2920,7 @@ class V8_EXPORT Object : public Value {
* C++ API. Hidden properties introduced by V8 internally (for example the
* identity hash) are prefixed with "v8::".
*/
+ // TODO(dcarney): convert these to take a isolate and optionally bailout?
bool SetHiddenValue(Handle<String> key, Handle<Value> value);
Local<Value> GetHiddenValue(Handle<String> key);
bool DeleteHiddenValue(Handle<String> key);
@@ -2678,6 +2929,7 @@ class V8_EXPORT Object : public Value {
* Clone this object with a fast but shallow copy. Values will point
* to the same values as the original object.
*/
+ // TODO(dcarney): take an isolate and optionally bail out?
Local<Object> Clone();
/**
@@ -2723,21 +2975,27 @@ class V8_EXPORT Object : public Value {
* Call an Object as a function if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
*/
- Local<Value> CallAsFunction(Handle<Value> recv,
- int argc,
- Handle<Value> argv[]);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Value> CallAsFunction(Handle<Value> recv, int argc,
+ Handle<Value> argv[]));
+ MaybeLocal<Value> CallAsFunction(Local<Context> context, Handle<Value> recv,
+ int argc, Handle<Value> argv[]);
/**
* Call an Object as a constructor if a callback is set by the
* ObjectTemplate::SetCallAsFunctionHandler method.
* Note: This method behaves like the Function::NewInstance method.
*/
- Local<Value> CallAsConstructor(int argc, Handle<Value> argv[]);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Value> CallAsConstructor(int argc,
+ Handle<Value> argv[]));
+ MaybeLocal<Value> CallAsConstructor(Local<Context> context, int argc,
+ Local<Value> argv[]);
/**
* Return the isolate to which the Object belongs to.
*/
- Isolate* GetIsolate();
+ V8_DEPRECATE_SOON("Keep track of isolate correctly", Isolate* GetIsolate());
static Local<Object> New(Isolate* isolate);
@@ -2762,7 +3020,9 @@ class V8_EXPORT Array : public Object {
* Clones an element at index |index|. Returns an empty
* handle if cloning fails (for any reason).
*/
- Local<Object> CloneElementAt(uint32_t index);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Object> CloneElementAt(uint32_t index));
+ MaybeLocal<Object> CloneElementAt(Local<Context> context, uint32_t index);
/**
* Creates a JavaScript array with the given length. If the length
@@ -2907,9 +3167,23 @@ class V8_EXPORT Function : public Object {
Local<Value> data = Local<Value>(),
int length = 0);
- Local<Object> NewInstance() const;
- Local<Object> NewInstance(int argc, Handle<Value> argv[]) const;
- Local<Value> Call(Handle<Value> recv, int argc, Handle<Value> argv[]);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Object> NewInstance(int argc,
+ Handle<Value> argv[])) const;
+ MaybeLocal<Object> NewInstance(Local<Context> context, int argc,
+ Handle<Value> argv[]) const;
+
+ V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance()) const;
+ MaybeLocal<Object> NewInstance(Local<Context> context) const {
+ return NewInstance(context, 0, nullptr);
+ }
+
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Value> Call(Handle<Value> recv, int argc,
+ Handle<Value> argv[]));
+ MaybeLocal<Value> Call(Local<Context> context, Handle<Value> recv, int argc,
+ Handle<Value> argv[]);
+
void SetName(Handle<String> name);
Handle<Value> GetName() const;
@@ -2975,7 +3249,9 @@ class V8_EXPORT Promise : public Object {
/**
* Create a new resolver, along with an associated promise in pending state.
*/
- static Local<Resolver> New(Isolate* isolate);
+ static V8_DEPRECATE_SOON("Use maybe version",
+ Local<Resolver> New(Isolate* isolate));
+ static MaybeLocal<Resolver> New(Local<Context> context);
/**
* Extract the associated promise.
@@ -2986,8 +3262,11 @@ class V8_EXPORT Promise : public Object {
* Resolve/reject the associated promise with a given value.
* Ignored if the promise is no longer pending.
*/
- void Resolve(Handle<Value> value);
- void Reject(Handle<Value> value);
+ V8_DEPRECATE_SOON("Use maybe version", void Resolve(Handle<Value> value));
+ Maybe<bool> Resolve(Local<Context> context, Handle<Value> value);
+
+ V8_DEPRECATE_SOON("Use maybe version", void Reject(Handle<Value> value));
+ Maybe<bool> Reject(Local<Context> context, Handle<Value> value);
V8_INLINE static Resolver* Cast(Value* obj);
@@ -3002,9 +3281,17 @@ class V8_EXPORT Promise : public Object {
* an argument. If the promise is already resolved/rejected, the handler is
* invoked at the end of turn.
*/
- Local<Promise> Chain(Handle<Function> handler);
- Local<Promise> Catch(Handle<Function> handler);
- Local<Promise> Then(Handle<Function> handler);
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Promise> Chain(Handle<Function> handler));
+ MaybeLocal<Promise> Chain(Local<Context> context, Handle<Function> handler);
+
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Promise> Catch(Handle<Function> handler));
+ MaybeLocal<Promise> Catch(Local<Context> context, Handle<Function> handler);
+
+ V8_DEPRECATE_SOON("Use maybe version",
+ Local<Promise> Then(Handle<Function> handler));
+ MaybeLocal<Promise> Then(Local<Context> context, Handle<Function> handler);
/**
* Returns true if the promise has at least one derived promise, and
@@ -3025,6 +3312,10 @@ class V8_EXPORT Promise : public Object {
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
#endif
+
+enum class ArrayBufferCreationMode { kInternalized, kExternalized };
+
+
/**
* An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
* This API is experimental and may change significantly.
@@ -3100,12 +3391,13 @@ class V8_EXPORT ArrayBuffer : public Object {
/**
* Create a new ArrayBuffer over an existing memory block.
- * The created array buffer is immediately in externalized state.
+ * The created array buffer is by default immediately in externalized state.
* The memory block will not be reclaimed when a created ArrayBuffer
* is garbage-collected.
*/
- static Local<ArrayBuffer> New(Isolate* isolate, void* data,
- size_t byte_length);
+ static Local<ArrayBuffer> New(
+ Isolate* isolate, void* data, size_t byte_length,
+ ArrayBufferCreationMode mode = ArrayBufferCreationMode::kExternalized);
/**
* Returns true if ArrayBuffer is extrenalized, that is, does not
@@ -3137,6 +3429,18 @@ class V8_EXPORT ArrayBuffer : public Object {
*/
Contents Externalize();
+ /**
+ * Get a pointer to the ArrayBuffer's underlying memory block without
+ * externalizing it. If the ArrayBuffer is not externalized, this pointer
+ * will become invalid as soon as the ArrayBuffer became garbage collected.
+ *
+ * The embedder should make sure to hold a strong reference to the
+ * ArrayBuffer while accessing this pointer.
+ *
+ * The memory block is guaranteed to be allocated with |Allocator::Allocate|.
+ */
+ Contents GetContents();
+
V8_INLINE static ArrayBuffer* Cast(Value* obj);
static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
@@ -3174,6 +3478,23 @@ class V8_EXPORT ArrayBufferView : public Object {
*/
size_t ByteLength();
+ /**
+ * Copy the contents of the ArrayBufferView's buffer to an embedder defined
+ * memory without additional overhead that calling ArrayBufferView::Buffer
+ * might incur.
+ *
+ * Will write at most min(|byte_length|, ByteLength) bytes starting at
+ * ByteOffset of the underling buffer to the memory starting at |dest|.
+ * Returns the number of bytes actually written.
+ */
+ size_t CopyContents(void* dest, size_t byte_length);
+
+ /**
+ * Returns true if ArrayBufferView's backing ArrayBuffer has already been
+ * allocated.
+ */
+ bool HasBuffer() const;
+
V8_INLINE static ArrayBufferView* Cast(Value* obj);
static const int kInternalFieldCount =
@@ -3370,7 +3691,9 @@ class V8_EXPORT DataView : public ArrayBufferView {
*/
class V8_EXPORT Date : public Object {
public:
- static Local<Value> New(Isolate* isolate, double time);
+ static V8_DEPRECATE_SOON("Use maybe version.",
+ Local<Value> New(Isolate* isolate, double time));
+ static MaybeLocal<Value> New(Local<Context> context, double time);
/**
* A specialization of Value::NumberValue that is more efficient
@@ -3491,7 +3814,11 @@ class V8_EXPORT RegExp : public Object {
* static_cast<RegExp::Flags>(kGlobal | kMultiline))
* is equivalent to evaluating "/foo/gm".
*/
- static Local<RegExp> New(Handle<String> pattern, Flags flags);
+ static V8_DEPRECATE_SOON("Use maybe version",
+ Local<RegExp> New(Handle<String> pattern,
+ Flags flags));
+ static MaybeLocal<RegExp> New(Local<Context> context, Handle<String> pattern,
+ Flags flags);
/**
* Returns the value of the source property: a string representing
@@ -3875,7 +4202,8 @@ class V8_EXPORT FunctionTemplate : public Template {
int length = 0);
/** Returns the unique function instance in the current execution context.*/
- Local<Function> GetFunction();
+ V8_DEPRECATE_SOON("Use maybe version", Local<Function> GetFunction());
+ MaybeLocal<Function> GetFunction(Local<Context> context);
/**
* Set the call-handler callback for a FunctionTemplate. This
@@ -3907,6 +4235,13 @@ class V8_EXPORT FunctionTemplate : public Template {
*/
void SetClassName(Handle<String> name);
+
+ /**
+ * When set to true, no access check will be performed on the receiver of a
+ * function call. Currently defaults to true, but this is subject to change.
+ */
+ void SetAcceptAnyReceiver(bool value);
+
/**
* Determines whether the __proto__ accessor ignores instances of
* the function template. If instances of the function template are
@@ -3946,7 +4281,17 @@ class V8_EXPORT FunctionTemplate : public Template {
};
-enum class PropertyHandlerFlags { kNone = 0, kAllCanRead = 1 };
+enum class PropertyHandlerFlags {
+ kNone = 0,
+ // See ALL_CAN_READ above.
+ kAllCanRead = 1,
+ // Will not call into interceptor for properties on the receiver or prototype
+ // chain. Currently only valid for named interceptors.
+ kNonMasking = 1 << 1,
+ // Will not call into interceptor for symbol lookup. Only meaningful for
+ // named interceptors.
+ kOnlyInterceptStrings = 1 << 2,
+};
struct NamedPropertyHandlerConfiguration {
@@ -4015,11 +4360,11 @@ class V8_EXPORT ObjectTemplate : public Template {
public:
/** Creates an ObjectTemplate. */
static Local<ObjectTemplate> New(Isolate* isolate);
- // Will be deprecated soon.
- static Local<ObjectTemplate> New();
+ static V8_DEPRECATE_SOON("Use isolate version", Local<ObjectTemplate> New());
/** Creates a new instance of this template.*/
- Local<Object> NewInstance();
+ V8_DEPRECATE_SOON("Use maybe version", Local<Object> NewInstance());
+ MaybeLocal<Object> NewInstance(Local<Context> context);
/**
* Sets an accessor on the object template.
@@ -4406,8 +4751,7 @@ enum ObjectSpace {
kObjectSpaceCodeSpace = 1 << 3,
kObjectSpaceMapSpace = 1 << 4,
kObjectSpaceCellSpace = 1 << 5,
- kObjectSpacePropertyCellSpace = 1 << 6,
- kObjectSpaceLoSpace = 1 << 7,
+ kObjectSpaceLoSpace = 1 << 6,
kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace |
kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace |
kObjectSpaceMapSpace | kObjectSpaceLoSpace
@@ -4665,7 +5009,10 @@ class V8_EXPORT Isolate {
CreateParams()
: entry_hook(NULL),
code_event_handler(NULL),
- enable_serializer(false) {}
+ snapshot_blob(NULL),
+ counter_lookup_callback(NULL),
+ create_histogram_callback(NULL),
+ add_histogram_sample_callback(NULL) {}
/**
* The optional entry_hook allows the host application to provide the
@@ -4688,9 +5035,25 @@ class V8_EXPORT Isolate {
ResourceConstraints constraints;
/**
- * This flag currently renders the Isolate unusable.
+ * Explicitly specify a startup snapshot blob. The embedder owns the blob.
+ */
+ StartupData* snapshot_blob;
+
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * statistics counters.
*/
- bool enable_serializer;
+ CounterLookupCallback counter_lookup_callback;
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * histograms. The CreateHistogram function returns a
+ * histogram which will later be passed to the AddHistogramSample
+ * function.
+ */
+ CreateHistogramCallback create_histogram_callback;
+ AddHistogramSampleCallback add_histogram_sample_callback;
};
@@ -4789,6 +5152,7 @@ class V8_EXPORT Isolate {
enum UseCounterFeature {
kUseAsm = 0,
kBreakIterator = 1,
+ kLegacyConst = 2,
kUseCounterFeatureCount // This enum value must be last.
};
@@ -5056,13 +5420,6 @@ class V8_EXPORT Isolate {
void RequestInterrupt(InterruptCallback callback, void* data);
/**
- * Clear interrupt request created by |RequestInterrupt|.
- * Can be called from another thread without acquiring a |Locker|.
- */
- V8_DEPRECATED("There's no way to clear interrupts in flight.",
- void ClearInterrupt());
-
- /**
* Request garbage collection in this Isolate. It is only valid to call this
* function if --expose_gc was specified.
*
@@ -5157,7 +5514,9 @@ class V8_EXPORT Isolate {
*
* The idle_time_in_ms argument specifies the time V8 has to perform
* garbage collection. There is no guarantee that the actual work will be
- * done within the time limit.
+ * done within the time limit. This variant is deprecated and will be removed
+ * in the future.
+ *
* The deadline_in_seconds argument specifies the deadline V8 has to finish
* garbage collection work. deadline_in_seconds is compared with
* MonotonicallyIncreasingTime() and should be based on the same timebase as
@@ -5367,16 +5726,17 @@ typedef uintptr_t (*ReturnAddressLocationResolver)(
class V8_EXPORT V8 {
public:
/** Set the callback to invoke in case of fatal errors. */
- // TODO(dcarney): deprecate this.
- V8_INLINE static void SetFatalErrorHandler(FatalErrorCallback that);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void SetFatalErrorHandler(FatalErrorCallback that));
/**
* Set the callback to invoke to check if code generation from
* strings should be allowed.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void SetAllowCodeGenerationFromStringsCallback(
- AllowCodeGenerationFromStringsCallback that);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version", void SetAllowCodeGenerationFromStringsCallback(
+ AllowCodeGenerationFromStringsCallback that));
/**
* Set allocator to use for ArrayBuffer memory.
@@ -5390,8 +5750,7 @@ class V8_EXPORT V8 {
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static bool IsDead();
+ V8_INLINE static V8_DEPRECATE_SOON("no alternative", bool IsDead());
/**
* Hand startup data to V8, in case the embedder has chosen to build
@@ -5416,7 +5775,7 @@ class V8_EXPORT V8 {
* Returns { NULL, 0 } on failure.
* The caller owns the data array in the return value.
*/
- static StartupData CreateSnapshotDataBlob(char* custom_source = NULL);
+ static StartupData CreateSnapshotDataBlob(const char* custom_source = NULL);
/**
* Adds a message listener.
@@ -5427,24 +5786,26 @@ class V8_EXPORT V8 {
* If data is specified, it will be passed to the callback when it is called.
* Otherwise, the exception object will be passed to the callback instead.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static bool AddMessageListener(
- MessageCallback that, Handle<Value> data = Handle<Value>());
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ bool AddMessageListener(MessageCallback that,
+ Handle<Value> data = Handle<Value>()));
/**
* Remove all message listeners from the specified callback function.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void RemoveMessageListeners(MessageCallback that);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version", void RemoveMessageListeners(MessageCallback that));
/**
* Tells V8 to capture current stack trace when uncaught exception occurs
* and report it to the message listeners. The option is off by default.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void SetCaptureStackTraceForUncaughtExceptions(
- bool capture, int frame_limit = 10,
- StackTrace::StackTraceOptions options = StackTrace::kOverview);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture, int frame_limit = 10,
+ StackTrace::StackTraceOptions options = StackTrace::kOverview));
/**
* Sets V8 flags from a string.
@@ -5462,9 +5823,9 @@ class V8_EXPORT V8 {
static const char* GetVersion();
/** Callback function for reporting failed access checks.*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void SetFailedAccessCheckCallbackFunction(
- FailedAccessCheckCallback);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback));
/**
* Enables the host application to receive a notification before a
@@ -5476,16 +5837,18 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
- // TODO(dcarney): deprecate this.
- static void AddGCPrologueCallback(
- GCPrologueCallback callback, GCType gc_type_filter = kGCTypeAll);
+ static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void AddGCPrologueCallback(GCPrologueCallback callback,
+ GCType gc_type_filter = kGCTypeAll));
/**
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void RemoveGCPrologueCallback(GCPrologueCallback callback);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void RemoveGCPrologueCallback(GCPrologueCallback callback));
/**
* Enables the host application to receive a notification after a
@@ -5497,32 +5860,35 @@ class V8_EXPORT V8 {
* register the same callback function two times with different
* GCType filters.
*/
- // TODO(dcarney): deprecate this.
- static void AddGCEpilogueCallback(
- GCEpilogueCallback callback, GCType gc_type_filter = kGCTypeAll);
+ static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void AddGCEpilogueCallback(GCEpilogueCallback callback,
+ GCType gc_type_filter = kGCTypeAll));
/**
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void RemoveGCEpilogueCallback(GCEpilogueCallback callback);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void RemoveGCEpilogueCallback(GCEpilogueCallback callback));
/**
* Enables the host application to provide a mechanism to be notified
* and perform custom logging when V8 Allocates Executable Memory.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void AddMemoryAllocationCallback(
- MemoryAllocationCallback callback, ObjectSpace space,
- AllocationAction action);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+ ObjectSpace space,
+ AllocationAction action));
/**
* Removes callback that was installed by AddMemoryAllocationCallback.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void RemoveMemoryAllocationCallback(
- MemoryAllocationCallback callback);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback));
/**
* Initializes V8. This function needs to be called before the first Isolate
@@ -5552,8 +5918,8 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to terminate the current JS execution.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void TerminateExecution(Isolate* isolate);
+ V8_INLINE static V8_DEPRECATE_SOON("Use isolate version",
+ void TerminateExecution(Isolate* isolate));
/**
* Is V8 terminating JavaScript execution.
@@ -5565,8 +5931,9 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to check.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static bool IsExecutionTerminating(Isolate* isolate = NULL);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ bool IsExecutionTerminating(Isolate* isolate = NULL));
/**
* Resume execution capability in the given isolate, whose execution
@@ -5584,8 +5951,8 @@ class V8_EXPORT V8 {
*
* \param isolate The isolate in which to resume execution capability.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void CancelTerminateExecution(Isolate* isolate);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version", void CancelTerminateExecution(Isolate* isolate));
/**
* Releases any resources used by v8 and stops any utility threads
@@ -5603,25 +5970,26 @@ class V8_EXPORT V8 {
* heap. GC is not invoked prior to iterating, therefore there is no
* guarantee that visited objects are still alive.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void VisitExternalResources(
- ExternalResourceVisitor* visitor);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isoalte version",
+ void VisitExternalResources(ExternalResourceVisitor* visitor));
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void VisitHandlesWithClassIds(
- PersistentHandleVisitor* visitor);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor));
/**
* Iterates through all the persistent handles in isolate's heap that have
* class_ids.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void VisitHandlesWithClassIds(
- Isolate* isolate, PersistentHandleVisitor* visitor);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void VisitHandlesWithClassIds(Isolate* isolate,
+ PersistentHandleVisitor* visitor));
/**
* Iterates through all the persistent handles in the current isolate's heap
@@ -5630,9 +5998,10 @@ class V8_EXPORT V8 {
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
- // TODO(dcarney): deprecate this.
- V8_INLINE static void VisitHandlesForPartialDependence(
- Isolate* isolate, PersistentHandleVisitor* visitor);
+ V8_INLINE static V8_DEPRECATE_SOON(
+ "Use isolate version",
+ void VisitHandlesForPartialDependence(Isolate* isolate,
+ PersistentHandleVisitor* visitor));
/**
* Initialize the ICU library bundled with V8. The embedder should only
@@ -5658,8 +6027,6 @@ class V8_EXPORT V8 {
private:
V8();
- enum WeakHandleType { PhantomHandle, NonphantomHandle };
-
static internal::Object** GlobalizeReference(internal::Isolate* isolate,
internal::Object** handle);
static internal::Object** CopyPersistent(internal::Object** handle);
@@ -5667,20 +6034,33 @@ class V8_EXPORT V8 {
typedef WeakCallbackData<Value, void>::Callback WeakCallback;
static void MakeWeak(internal::Object** global_handle, void* data,
WeakCallback weak_callback);
- static void MakePhantom(internal::Object** global_handle, void* data,
- // Must be 0 or kNoInternalFieldIndex.
- int internal_field_index1,
- // Must be 1 or kNoInternalFieldIndex.
- int internal_field_index2,
- PhantomCallbackData<void>::Callback weak_callback);
+ static void MakeWeak(internal::Object** global_handle, void* data,
+ WeakCallbackInfo<void>::Callback weak_callback,
+ WeakCallbackType type);
+ static void MakeWeak(internal::Object** global_handle, void* data,
+ // Must be 0 or -1.
+ int internal_field_index1,
+ // Must be 1 or -1.
+ int internal_field_index2,
+ WeakCallbackInfo<void>::Callback weak_callback);
static void* ClearWeak(internal::Object** global_handle);
static void Eternalize(Isolate* isolate,
Value* handle,
int* index);
static Local<Value> GetEternal(Isolate* isolate, int index);
+ static void CheckIsJust(bool is_just);
+ static void ToLocalEmpty();
+ static void InternalFieldOutOfBounds(int index);
+
template <class T> friend class Handle;
template <class T> friend class Local;
+ template <class T>
+ friend class MaybeLocal;
+ template <class T>
+ friend class Maybe;
+ template <class T>
+ friend class WeakCallbackInfo;
template <class T> friend class Eternal;
template <class T> friend class PersistentBase;
template <class T, class M> friend class Persistent;
@@ -5689,6 +6069,69 @@ class V8_EXPORT V8 {
/**
+ * A simple Maybe type, representing an object which may or may not have a
+ * value, see https://hackage.haskell.org/package/base/docs/Data-Maybe.html.
+ *
+ * If an API method returns a Maybe<>, the API method can potentially fail
+ * either because an exception is thrown, or because an exception is pending,
+ * e.g. because a previous API call threw an exception that hasn't been caught
+ * yet, or because a TerminateExecution exception was thrown. In that case, a
+ * "Nothing" value is returned.
+ */
+template <class T>
+class Maybe {
+ public:
+ V8_INLINE bool IsNothing() const { return !has_value; }
+ V8_INLINE bool IsJust() const { return has_value; }
+
+ // Will crash when checks are enabled if the Maybe<> is nothing.
+ V8_INLINE T FromJust() const {
+#ifdef V8_ENABLE_CHECKS
+ V8::CheckIsJust(IsJust());
+#endif
+ return value;
+ }
+
+ V8_INLINE T FromMaybe(const T& default_value) const {
+ return has_value ? value : default_value;
+ }
+
+ V8_INLINE bool operator==(const Maybe& other) const {
+ return (IsJust() == other.IsJust()) &&
+ (!IsJust() || FromJust() == other.FromJust());
+ }
+
+ V8_INLINE bool operator!=(const Maybe& other) const {
+ return !operator==(other);
+ }
+
+ private:
+ Maybe() : has_value(false) {}
+ explicit Maybe(const T& t) : has_value(true), value(t) {}
+
+ bool has_value;
+ T value;
+
+ template <class U>
+ friend Maybe<U> Nothing();
+ template <class U>
+ friend Maybe<U> Just(const U& u);
+};
+
+
+template <class T>
+inline Maybe<T> Nothing() {
+ return Maybe<T>();
+}
+
+
+template <class T>
+inline Maybe<T> Just(const T& t) {
+ return Maybe<T>(t);
+}
+
+
+/**
* An external exception handler.
*/
class V8_EXPORT TryCatch {
@@ -5698,8 +6141,7 @@ class V8_EXPORT TryCatch {
* all TryCatch blocks should be stack allocated because the memory
* location itself is compared against JavaScript try/catch blocks.
*/
- // TODO(dcarney): deprecate.
- TryCatch();
+ V8_DEPRECATE_SOON("Use isolate version", TryCatch());
/**
* Creates a new try/catch block and registers it with v8. Note that
@@ -5763,7 +6205,8 @@ class V8_EXPORT TryCatch {
* Returns the .stack property of the thrown object. If no .stack
* property is present an empty handle is returned.
*/
- Local<Value> StackTrace() const;
+ V8_DEPRECATE_SOON("Use maybe version.", Local<Value> StackTrace()) const;
+ MaybeLocal<Value> StackTrace(Local<Context> context) const;
/**
* Returns the message associated with this exception. If there is
@@ -5832,10 +6275,7 @@ class V8_EXPORT TryCatch {
v8::TryCatch* next_;
void* exception_;
void* message_obj_;
- void* message_script_;
void* js_stack_comparable_address_;
- int message_start_pos_;
- int message_end_pos_;
bool is_verbose_ : 1;
bool can_continue_ : 1;
bool capture_message_ : 1;
@@ -5947,6 +6387,13 @@ class V8_EXPORT Context {
v8::Isolate* GetIsolate();
/**
+ * The field at kDebugIdIndex is reserved for V8 debugger implementation.
+ * The value is propagated to the scripts compiled in given Context and
+ * can be used for filtering scripts.
+ */
+ enum EmbedderDataFields { kDebugIdIndex = 0 };
+
+ /**
* Gets the embedder data with the given index, which must have been set by a
* previous call to SetEmbedderData with the same index. Note that index 0
* currently has a special meaning for Chrome's debugger.
@@ -6255,7 +6702,7 @@ class Internals {
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 74;
+ static const int kContextEmbedderDataIndex = 76;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@@ -6273,7 +6720,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 155;
+ static const int kEmptyStringRootIndex = 156;
// The external allocation limit should be below 256 MB on all architectures
// to avoid that resource-constrained embedders run low on memory.
@@ -6291,7 +6738,7 @@ class Internals {
static const int kJSObjectType = 0xbd;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
- static const int kForeignType = 0x88;
+ static const int kForeignType = 0x87;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@@ -6457,6 +6904,26 @@ Local<T> Eternal<T>::Get(Isolate* isolate) {
template <class T>
+Local<T> MaybeLocal<T>::ToLocalChecked() {
+#ifdef V8_ENABLE_CHECKS
+ if (val_ == nullptr) V8::ToLocalEmpty();
+#endif
+ return Local<T>(val_);
+}
+
+
+template <class T>
+void* WeakCallbackInfo<T>::GetInternalField(int index) const {
+#ifdef V8_ENABLE_CHECKS
+ if (index < 0 || index >= kInternalFieldsInWeakCallback) {
+ V8::InternalFieldOutOfBounds(index);
+ }
+#endif
+ return internal_fields_[index];
+}
+
+
+template <class T>
T* PersistentBase<T>::New(Isolate* isolate, T* that) {
if (that == NULL) return NULL;
internal::Object** p = reinterpret_cast<internal::Object**>(that);
@@ -6560,12 +7027,23 @@ void PersistentBase<T>::SetWeak(
template <class T>
template <typename P>
void PersistentBase<T>::SetPhantom(
- P* parameter, typename PhantomCallbackData<P>::Callback callback,
+ P* parameter, typename WeakCallbackInfo<P>::Callback callback,
int internal_field_index1, int internal_field_index2) {
- typedef typename PhantomCallbackData<void>::Callback Callback;
- V8::MakePhantom(reinterpret_cast<internal::Object**>(this->val_), parameter,
- internal_field_index1, internal_field_index2,
- reinterpret_cast<Callback>(callback));
+ typedef typename WeakCallbackInfo<void>::Callback Callback;
+ V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
+ internal_field_index1, internal_field_index2,
+ reinterpret_cast<Callback>(callback));
+}
+
+
+template <class T>
+template <typename P>
+V8_INLINE void PersistentBase<T>::SetWeak(
+ P* parameter, typename WeakCallbackInfo<P>::Callback callback,
+ WeakCallbackType type) {
+ typedef typename WeakCallbackInfo<void>::Callback Callback;
+ V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
+ reinterpret_cast<Callback>(callback), type);
}
@@ -6823,6 +7301,9 @@ Handle<Integer> ScriptOrigin::ScriptID() const {
}
+Handle<Value> ScriptOrigin::SourceMapUrl() const { return source_map_url_; }
+
+
ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
CachedData* data)
: source_string(string),
@@ -6831,6 +7312,7 @@ ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
resource_column_offset(origin.ResourceColumnOffset()),
resource_is_embedder_debug_script(origin.ResourceIsEmbedderDebugScript()),
resource_is_shared_cross_origin(origin.ResourceIsSharedCrossOrigin()),
+ source_map_url(origin.SourceMapUrl()),
cached_data(data) {}
@@ -7046,6 +7528,14 @@ Local<Uint32> Value::ToUint32() const {
Local<Int32> Value::ToInt32() const { return ToInt32(Isolate::GetCurrent()); }
+Boolean* Boolean::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Boolean*>(value);
+}
+
+
Name* Name::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -7078,6 +7568,22 @@ Integer* Integer::Cast(v8::Value* value) {
}
+Int32* Int32::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Int32*>(value);
+}
+
+
+Uint32* Uint32::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint32*>(value);
+}
+
+
Date* Date::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index ca806cbd24..a0d9b5c967 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -42,8 +42,8 @@
((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= \
((major) * 10000 + (minor) * 100 + (patchlevel)))
#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
-# define V8_GNUC_PREREQ(major, minor, patchlevel) \
- ((__GNUC__ * 10000 + __GNUC_MINOR__) >= \
+# define V8_GNUC_PREREQ(major, minor, patchlevel) \
+ ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= \
((major) * 10000 + (minor) * 100 + (patchlevel)))
#else
# define V8_GNUC_PREREQ(major, minor, patchlevel) 0
@@ -343,6 +343,10 @@ declarator __attribute__((deprecated))
#endif
+// a macro to make it easier to see what will be deprecated.
+#define V8_DEPRECATE_SOON(message, declarator) declarator
+
+
// A macro to provide the compiler with branch prediction information.
#if V8_HAS_BUILTIN_EXPECT
# define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
@@ -425,4 +429,13 @@ namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
# define V8_ALIGNOF(type) (sizeof(::v8::AlignOfHelper<type>) - sizeof(type))
#endif
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+// int foo() WARN_UNUSED_RESULT;
+#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
+#define V8_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
+#endif
+
#endif // V8CONFIG_H_
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index 4dbb3c756a..4d320535f1 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -7,7 +7,7 @@ include_rules = [
]
specific_include_rules = {
- "(mksnapshot|d8)\.cc": [
+ "d8\.cc": [
"+include/libplatform/libplatform.h",
],
}
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index e08f86fc22..f774c6db46 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -242,14 +242,8 @@ void Accessors::ArrayLengthSetter(
return;
}
- Handle<Object> exception;
- maybe = isolate->factory()->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0));
- if (!maybe.ToHandle(&exception)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
-
+ Handle<Object> exception = isolate->factory()->NewRangeError(
+ "invalid_array_length", HandleVector<Object>(NULL, 0));
isolate->ScheduleThrow(*exception);
}
@@ -1101,12 +1095,52 @@ void Accessors::FunctionLengthGetter(
}
+MUST_USE_RESULT static MaybeHandle<Object> ReplaceAccessorWithDataProperty(
+ Isolate* isolate, Handle<JSObject> object, Handle<Name> name,
+ Handle<Object> value, bool is_observed, Handle<Object> old_value) {
+ LookupIterator it(object, name);
+ CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+ DCHECK(it.HolderIsReceiverOrHiddenPrototype());
+ it.ReconfigureDataProperty(value, it.property_details().attributes());
+ value = it.WriteDataValue(value);
+
+ if (is_observed && !old_value->SameValue(*value)) {
+ return JSObject::EnqueueChangeRecord(object, "update", name, old_value);
+ }
+
+ return value;
+}
+
+
+MUST_USE_RESULT static MaybeHandle<Object> SetFunctionLength(
+ Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
+ Handle<Object> old_value;
+ bool is_observed = function->map()->is_observed();
+ if (is_observed) {
+ old_value = handle(Smi::FromInt(function->shared()->length()), isolate);
+ }
+
+ return ReplaceAccessorWithDataProperty(isolate, function,
+ isolate->factory()->length_string(),
+ value, is_observed, old_value);
+}
+
+
void Accessors::FunctionLengthSetter(
v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
- // Function length is non writable, non configurable.
- UNREACHABLE();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<Object> value = Utils::OpenHandle(*val);
+
+ if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
+
+ Handle<JSFunction> object =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
+ if (SetFunctionLength(isolate, object, value).is_null()) {
+ isolate->OptionalRescheduleException(false);
+ }
}
@@ -1137,12 +1171,35 @@ void Accessors::FunctionNameGetter(
}
+MUST_USE_RESULT static MaybeHandle<Object> SetFunctionName(
+ Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
+ Handle<Object> old_value;
+ bool is_observed = function->map()->is_observed();
+ if (is_observed) {
+ old_value = handle(function->shared()->name(), isolate);
+ }
+
+ return ReplaceAccessorWithDataProperty(isolate, function,
+ isolate->factory()->name_string(),
+ value, is_observed, old_value);
+}
+
+
void Accessors::FunctionNameSetter(
v8::Local<v8::Name> name,
v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
- // Function name is non writable, non configurable.
- UNREACHABLE();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+ HandleScope scope(isolate);
+ Handle<Object> value = Utils::OpenHandle(*val);
+
+ if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
+
+ Handle<JSFunction> object =
+ Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
+ if (SetFunctionName(isolate, object, value).is_null()) {
+ isolate->OptionalRescheduleException(false);
+ }
}
@@ -1459,14 +1516,8 @@ static void ModuleGetExport(
if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
- Handle<Object> exception;
- MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
+ Handle<Object> exception = isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
- if (!maybe.ToHandle(&exception)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
-
isolate->ScheduleThrow(*exception);
return;
}
@@ -1486,14 +1537,8 @@ static void ModuleSetExport(
Isolate* isolate = context->GetIsolate();
if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property);
- Handle<Object> exception;
- MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
+ Handle<Object> exception = isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
- if (!maybe.ToHandle(&exception)) {
- isolate->OptionalRescheduleException(false);
- return;
- }
-
isolate->ScheduleThrow(*exception);
return;
}
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 11f20efe86..e7924533ac 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -81,14 +81,14 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
LookupIterator it(object, Handle<Name>::cast(key),
LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- DCHECK(maybe.has_value);
+ DCHECK(maybe.IsJust());
duplicate = it.IsFound();
} else {
uint32_t index = 0;
key->ToArrayIndex(&index);
Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index);
- if (!maybe.has_value) return MaybeHandle<Object>();
- duplicate = maybe.value;
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
+ duplicate = maybe.FromJust();
}
if (duplicate) {
Handle<Object> args[1] = {key};
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 4d07f8216e..049c35a903 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -22,6 +22,7 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
+#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
#include "src/cpu-profiler.h"
@@ -34,8 +35,8 @@
#include "src/icu_util.h"
#include "src/json-parser.h"
#include "src/messages.h"
-#include "src/natives.h"
#include "src/parser.h"
+#include "src/pending-compilation-error-handler.h"
#include "src/profile-generator-inl.h"
#include "src/property.h"
#include "src/property-details.h"
@@ -45,55 +46,157 @@
#include "src/sampler.h"
#include "src/scanner-character-streams.h"
#include "src/simulator.h"
-#include "src/snapshot.h"
+#include "src/snapshot/natives.h"
+#include "src/snapshot/snapshot.h"
#include "src/unicode-inl.h"
#include "src/v8threads.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
-#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
+namespace v8 {
-#define ENTER_V8(isolate) \
- i::VMState<v8::OTHER> __state__((isolate))
+#define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
-namespace v8 {
-#define ON_BAILOUT(isolate, location, code) \
- if (IsExecutionTerminatingCheck(isolate)) { \
- code; \
- UNREACHABLE(); \
- }
+#define ENTER_V8(isolate) i::VMState<v8::OTHER> __state__((isolate))
-#define EXCEPTION_PREAMBLE(isolate) \
- (isolate)->handle_scope_implementer()->IncrementCallDepth(); \
- DCHECK(!(isolate)->external_caught_exception()); \
+#define PREPARE_FOR_EXECUTION_GENERIC(isolate, context, function_name, \
+ bailout_value, HandleScopeClass, \
+ do_callback) \
+ if (IsExecutionTerminatingCheck(isolate)) { \
+ return bailout_value; \
+ } \
+ HandleScopeClass handle_scope(isolate); \
+ CallDepthScope call_depth_scope(isolate, context, do_callback); \
+ LOG_API(isolate, function_name); \
+ ENTER_V8(isolate); \
bool has_pending_exception = false
-#define EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, do_callback) \
- do { \
- i::HandleScopeImplementer* handle_scope_implementer = \
- (isolate)->handle_scope_implementer(); \
- handle_scope_implementer->DecrementCallDepth(); \
- if (has_pending_exception) { \
- bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero(); \
- (isolate)->OptionalRescheduleException(call_depth_is_zero); \
- do_callback \
- return value; \
- } \
- do_callback \
+#define PREPARE_FOR_EXECUTION_WITH_CONTEXT( \
+ context, function_name, bailout_value, HandleScopeClass, do_callback) \
+ auto isolate = context.IsEmpty() \
+ ? i::Isolate::Current() \
+ : reinterpret_cast<i::Isolate*>(context->GetIsolate()); \
+ PREPARE_FOR_EXECUTION_GENERIC(isolate, context, function_name, \
+ bailout_value, HandleScopeClass, do_callback);
+
+
+#define PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, function_name, T) \
+ PREPARE_FOR_EXECUTION_GENERIC(isolate, Local<Context>(), function_name, \
+ MaybeLocal<T>(), InternalEscapableScope, \
+ false);
+
+
+#define PREPARE_FOR_EXECUTION(context, function_name, T) \
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, function_name, MaybeLocal<T>(), \
+ InternalEscapableScope, false)
+
+
+#define PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, function_name, T) \
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, function_name, MaybeLocal<T>(), \
+ InternalEscapableScope, true)
+
+
+#define PREPARE_FOR_EXECUTION_PRIMITIVE(context, function_name, T) \
+ PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, function_name, Nothing<T>(), \
+ i::HandleScope, false)
+
+
+#define EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, value) \
+ do { \
+ if (has_pending_exception) { \
+ call_depth_scope.Escape(); \
+ return value; \
+ } \
} while (false)
-#define EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, value) \
- EXCEPTION_BAILOUT_CHECK_GENERIC( \
- isolate, value, isolate->FireCallCompletedCallback();)
+#define RETURN_ON_FAILED_EXECUTION(T) \
+ EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, MaybeLocal<T>())
+
+
+#define RETURN_ON_FAILED_EXECUTION_PRIMITIVE(T) \
+ EXCEPTION_BAILOUT_CHECK_SCOPED(isolate, Nothing<T>())
+
+
+#define RETURN_TO_LOCAL_UNCHECKED(maybe_local, T) \
+ return maybe_local.FromMaybe(Local<T>());
+
+
+#define RETURN_ESCAPED(value) return handle_scope.Escape(value);
+
+
+namespace {
+
+Local<Context> ContextFromHeapObject(i::Handle<i::Object> obj) {
+ return reinterpret_cast<v8::Isolate*>(i::HeapObject::cast(*obj)->GetIsolate())
+ ->GetCurrentContext();
+}
+
+class InternalEscapableScope : public v8::EscapableHandleScope {
+ public:
+ explicit inline InternalEscapableScope(i::Isolate* isolate)
+ : v8::EscapableHandleScope(reinterpret_cast<v8::Isolate*>(isolate)) {}
+};
+
+
+class CallDepthScope {
+ public:
+ explicit CallDepthScope(i::Isolate* isolate, Local<Context> context,
+ bool do_callback)
+ : isolate_(isolate),
+ context_(context),
+ escaped_(false),
+ do_callback_(do_callback) {
+ // TODO(dcarney): remove this when blink stops crashing.
+ DCHECK(!isolate_->external_caught_exception());
+ isolate_->handle_scope_implementer()->IncrementCallDepth();
+ if (!context_.IsEmpty()) context_->Enter();
+ }
+ ~CallDepthScope() {
+ if (!context_.IsEmpty()) context_->Exit();
+ if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
+ if (do_callback_) isolate_->FireCallCompletedCallback();
+ }
+
+ void Escape() {
+ DCHECK(!escaped_);
+ escaped_ = true;
+ auto handle_scope_implementer = isolate_->handle_scope_implementer();
+ handle_scope_implementer->DecrementCallDepth();
+ bool call_depth_is_zero = handle_scope_implementer->CallDepthIsZero();
+ isolate_->OptionalRescheduleException(call_depth_is_zero);
+ }
+
+ private:
+ i::Isolate* const isolate_;
+ Local<Context> context_;
+ bool escaped_;
+ bool do_callback_;
+};
+
+} // namespace
-#define EXCEPTION_BAILOUT_CHECK(isolate, value) \
- EXCEPTION_BAILOUT_CHECK_GENERIC(isolate, value, ;)
+static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
+ i::Handle<i::Script> script) {
+ i::Handle<i::Object> scriptName(i::Script::GetNameOrSourceURL(script));
+ i::Handle<i::Object> source_map_url(script->source_mapping_url(), isolate);
+ v8::Isolate* v8_isolate =
+ reinterpret_cast<v8::Isolate*>(script->GetIsolate());
+ v8::ScriptOrigin origin(
+ Utils::ToLocal(scriptName),
+ v8::Integer::New(v8_isolate, script->line_offset()->value()),
+ v8::Integer::New(v8_isolate, script->column_offset()->value()),
+ v8::Boolean::New(v8_isolate, script->is_shared_cross_origin()),
+ v8::Integer::New(v8_isolate, script->id()->value()),
+ v8::Boolean::New(v8_isolate, script->is_embedder_debug_script()),
+ Utils::ToLocal(source_map_url));
+ return origin;
+}
// --- E x c e p t i o n B e h a v i o r ---
@@ -134,10 +237,6 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
heap_stats.cell_space_size = &cell_space_size;
intptr_t cell_space_capacity;
heap_stats.cell_space_capacity = &cell_space_capacity;
- intptr_t property_cell_space_size;
- heap_stats.property_cell_space_size = &property_cell_space_size;
- intptr_t property_cell_space_capacity;
- heap_stats.property_cell_space_capacity = &property_cell_space_capacity;
intptr_t lo_space_size;
heap_stats.lo_space_size = &lo_space_size;
int global_handle_count;
@@ -207,8 +306,10 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
}
-bool RunExtraCode(Isolate* isolate, char* utf8_source) {
+bool RunExtraCode(Isolate* isolate, const char* utf8_source) {
// Run custom script if provided.
+ base::ElapsedTimer timer;
+ timer.Start();
TryCatch try_catch;
Local<String> source_string = String::NewFromUtf8(isolate, utf8_source);
if (try_catch.HasCaught()) return false;
@@ -217,18 +318,24 @@ bool RunExtraCode(Isolate* isolate, char* utf8_source) {
Local<Script> script = ScriptCompiler::Compile(isolate, &source);
if (try_catch.HasCaught()) return false;
script->Run();
+ if (i::FLAG_profile_deserialization) {
+ i::PrintF("Executing custom snapshot script took %0.3f ms\n",
+ timer.Elapsed().InMillisecondsF());
+ }
+ timer.Stop();
return !try_catch.HasCaught();
}
-StartupData V8::CreateSnapshotDataBlob(char* custom_source) {
- Isolate::CreateParams params;
- params.enable_serializer = true;
- Isolate* isolate = v8::Isolate::New(params);
+StartupData V8::CreateSnapshotDataBlob(const char* custom_source) {
+ i::Isolate* internal_isolate = new i::Isolate(true);
+ Isolate* isolate = reinterpret_cast<Isolate*>(internal_isolate);
StartupData result = {NULL, 0};
{
+ base::ElapsedTimer timer;
+ timer.Start();
Isolate::Scope isolate_scope(isolate);
- i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ internal_isolate->Init(NULL);
Persistent<Context> context;
i::Snapshot::Metadata metadata;
{
@@ -266,6 +373,11 @@ StartupData V8::CreateSnapshotDataBlob(char* custom_source) {
result = i::Snapshot::CreateSnapshotBlob(ser, context_ser, metadata);
}
+ if (i::FLAG_profile_deserialization) {
+ i::PrintF("Creating snapshot took %0.3f ms\n",
+ timer.Elapsed().InMillisecondsF());
+ }
+ timer.Stop();
}
isolate->Dispose();
return result;
@@ -431,21 +543,29 @@ void V8::MakeWeak(i::Object** object, void* parameter,
}
-void V8::MakePhantom(i::Object** object, void* parameter,
- int internal_field_index1, int internal_field_index2,
- PhantomCallbackData<void>::Callback weak_callback) {
+void V8::MakeWeak(i::Object** object, void* parameter,
+ int internal_field_index1, int internal_field_index2,
+ WeakCallbackInfo<void>::Callback weak_callback) {
+ WeakCallbackType type = WeakCallbackType::kParameter;
if (internal_field_index1 == 0) {
if (internal_field_index2 == 1) {
- i::GlobalHandles::MakePhantom(object, parameter, 2, weak_callback);
+ type = WeakCallbackType::kInternalFields;
} else {
- DCHECK_EQ(internal_field_index2, kNoInternalFieldIndex);
- i::GlobalHandles::MakePhantom(object, parameter, 1, weak_callback);
+ DCHECK_EQ(internal_field_index2, -1);
+ type = WeakCallbackType::kInternalFields;
}
} else {
- DCHECK_EQ(internal_field_index1, kNoInternalFieldIndex);
- DCHECK_EQ(internal_field_index2, kNoInternalFieldIndex);
- i::GlobalHandles::MakePhantom(object, parameter, 0, weak_callback);
+ DCHECK_EQ(internal_field_index1, -1);
+ DCHECK_EQ(internal_field_index2, -1);
}
+ i::GlobalHandles::MakeWeak(object, parameter, weak_callback, type);
+}
+
+
+void V8::MakeWeak(i::Object** object, void* parameter,
+ WeakCallbackInfo<void>::Callback weak_callback,
+ WeakCallbackType type) {
+ i::GlobalHandles::MakeWeak(object, parameter, weak_callback, type);
}
@@ -472,6 +592,23 @@ Local<Value> V8::GetEternal(Isolate* v8_isolate, int index) {
}
+void V8::CheckIsJust(bool is_just) {
+ Utils::ApiCheck(is_just, "v8::FromJust", "Maybe value is Nothing.");
+}
+
+
+void V8::ToLocalEmpty() {
+ Utils::ApiCheck(false, "v8::ToLocalChecked", "Empty MaybeLocal.");
+}
+
+
+void V8::InternalFieldOutOfBounds(int index) {
+ Utils::ApiCheck(0 <= index && index < kInternalFieldsInWeakCallback,
+ "WeakCallbackInfo::GetInternalField",
+ "Internal field out of bounds.");
+}
+
+
// --- H a n d l e s ---
@@ -485,10 +622,14 @@ void HandleScope::Initialize(Isolate* isolate) {
// We do not want to check the correct usage of the Locker class all over the
// place, so we do it only here: Without a HandleScope, an embedder can do
// almost nothing, so it is enough to check in this central place.
- Utils::ApiCheck(!v8::Locker::IsActive() ||
- internal_isolate->thread_manager()->IsLockedByCurrentThread(),
- "HandleScope::HandleScope",
- "Entering the V8 API without proper locking in place");
+ // We make an exception if the serializer is enabled, which means that the
+ // Isolate is exclusively used to create a snapshot.
+ Utils::ApiCheck(
+ !v8::Locker::IsActive() ||
+ internal_isolate->thread_manager()->IsLockedByCurrentThread() ||
+ internal_isolate->serializer_enabled(),
+ "HandleScope::HandleScope",
+ "Entering the V8 API without proper locking in place");
i::HandleScopeData* current = internal_isolate->handle_scope_data();
isolate_ = internal_isolate;
prev_next_ = current->next;
@@ -833,6 +974,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
obj->set_length(length);
obj->set_undetectable(false);
obj->set_needs_access_check(false);
+ obj->set_accept_any_receiver(true);
if (!signature.IsEmpty())
obj->set_signature(*Utils::OpenHandle(*signature));
return Utils::ToLocal(obj);
@@ -1006,6 +1148,15 @@ void FunctionTemplate::SetClassName(Handle<String> name) {
}
+void FunctionTemplate::SetAcceptAnyReceiver(bool value) {
+ auto info = Utils::OpenHandle(this);
+ EnsureNotInstantiated(info, "v8::FunctionTemplate::SetAcceptAnyReceiver");
+ auto isolate = info->GetIsolate();
+ ENTER_V8(isolate);
+ info->set_accept_any_receiver(value);
+}
+
+
void FunctionTemplate::SetHiddenPrototype(bool value) {
auto info = Utils::OpenHandle(this);
EnsureNotInstantiated(info, "v8::FunctionTemplate::SetHiddenPrototype");
@@ -1173,10 +1324,12 @@ void ObjectTemplate::SetAccessor(v8::Handle<Name> name,
template <typename Getter, typename Setter, typename Query, typename Deleter,
typename Enumerator>
-static void ObjectTemplateSetNamedPropertyHandler(
- ObjectTemplate* templ, Getter getter, Setter setter, Query query,
- Deleter remover, Enumerator enumerator, Handle<Value> data,
- bool can_intercept_symbols, PropertyHandlerFlags flags) {
+static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
+ Getter getter, Setter setter,
+ Query query, Deleter remover,
+ Enumerator enumerator,
+ Handle<Value> data,
+ PropertyHandlerFlags flags) {
i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -1191,9 +1344,13 @@ static void ObjectTemplateSetNamedPropertyHandler(
if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
obj->set_flags(0);
- obj->set_can_intercept_symbols(can_intercept_symbols);
+ obj->set_can_intercept_symbols(
+ !(static_cast<int>(flags) &
+ static_cast<int>(PropertyHandlerFlags::kOnlyInterceptStrings)));
obj->set_all_can_read(static_cast<int>(flags) &
static_cast<int>(PropertyHandlerFlags::kAllCanRead));
+ obj->set_non_masking(static_cast<int>(flags) &
+ static_cast<int>(PropertyHandlerFlags::kNonMasking));
if (data.IsEmpty()) {
data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1207,9 +1364,9 @@ void ObjectTemplate::SetNamedPropertyHandler(
NamedPropertyGetterCallback getter, NamedPropertySetterCallback setter,
NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
NamedPropertyEnumeratorCallback enumerator, Handle<Value> data) {
- ObjectTemplateSetNamedPropertyHandler(this, getter, setter, query, remover,
- enumerator, data, false,
- PropertyHandlerFlags::kNone);
+ ObjectTemplateSetNamedPropertyHandler(
+ this, getter, setter, query, remover, enumerator, data,
+ PropertyHandlerFlags::kOnlyInterceptStrings);
}
@@ -1217,7 +1374,7 @@ void ObjectTemplate::SetHandler(
const NamedPropertyHandlerConfiguration& config) {
ObjectTemplateSetNamedPropertyHandler(
this, config.getter, config.setter, config.query, config.deleter,
- config.enumerator, config.data, true, config.flags);
+ config.enumerator, config.data, config.flags);
}
@@ -1373,9 +1530,36 @@ Local<Script> UnboundScript::BindToCurrentContext() {
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
i::Handle<i::SharedFunctionInfo>
function_info(i::SharedFunctionInfo::cast(*obj), obj->GetIsolate());
+ i::Isolate* isolate = obj->GetIsolate();
+
+ i::ScopeInfo* scope_info = function_info->scope_info();
+ i::Handle<i::JSReceiver> global(isolate->native_context()->global_object());
+ for (int i = 0; i < scope_info->StrongModeFreeVariableCount(); ++i) {
+ i::Handle<i::String> name_string(scope_info->StrongModeFreeVariableName(i));
+ i::ScriptContextTable::LookupResult result;
+ i::Handle<i::ScriptContextTable> script_context_table(
+ isolate->native_context()->script_context_table());
+ if (!i::ScriptContextTable::Lookup(script_context_table, name_string,
+ &result)) {
+ i::Handle<i::Name> name(scope_info->StrongModeFreeVariableName(i));
+ Maybe<bool> has = i::JSReceiver::HasProperty(global, name);
+ if (has.IsJust() && !has.FromJust()) {
+ i::PendingCompilationErrorHandler pending_error_handler_;
+ pending_error_handler_.ReportMessageAt(
+ scope_info->StrongModeFreeVariableStartPosition(i),
+ scope_info->StrongModeFreeVariableEndPosition(i),
+ "strong_unbound_global", name_string, i::kReferenceError);
+ i::Handle<i::Script> script(i::Script::cast(function_info->script()));
+ pending_error_handler_.ThrowPendingError(isolate, script);
+ isolate->ReportPendingMessages();
+ isolate->OptionalRescheduleException(true);
+ return Local<Script>();
+ }
+ }
+ }
i::Handle<i::JSFunction> function =
obj->GetIsolate()->factory()->NewFunctionFromSharedFunctionInfo(
- function_info, obj->GetIsolate()->native_context());
+ function_info, isolate->native_context());
return ToApiHandle<Script>(function);
}
@@ -1384,15 +1568,12 @@ int UnboundScript::GetId() {
i::Handle<i::HeapObject> obj =
i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::UnboundScript::GetId()", return -1);
LOG_API(isolate, "v8::UnboundScript::GetId");
- {
- i::HandleScope scope(isolate);
- i::Handle<i::SharedFunctionInfo> function_info(
- i::SharedFunctionInfo::cast(*obj));
- i::Handle<i::Script> script(i::Script::cast(function_info->script()));
- return script->id()->value();
- }
+ i::HandleScope scope(isolate);
+ i::Handle<i::SharedFunctionInfo> function_info(
+ i::SharedFunctionInfo::cast(*obj));
+ i::Handle<i::Script> script(i::Script::cast(function_info->script()));
+ return script->id()->value();
}
@@ -1400,7 +1581,6 @@ int UnboundScript::GetLineNumber(int code_pos) {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::UnboundScript::GetLineNumber()", return -1);
LOG_API(isolate, "UnboundScript::GetLineNumber");
if (obj->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(obj->script()));
@@ -1415,8 +1595,6 @@ Handle<Value> UnboundScript::GetScriptName() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::UnboundScript::GetName()",
- return Handle<String>());
LOG_API(isolate, "UnboundScript::GetName");
if (obj->script()->IsScript()) {
i::Object* name = i::Script::cast(obj->script())->name();
@@ -1431,8 +1609,6 @@ Handle<Value> UnboundScript::GetSourceURL() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::UnboundScript::GetSourceURL()",
- return Handle<String>());
LOG_API(isolate, "UnboundScript::GetSourceURL");
if (obj->script()->IsScript()) {
i::Object* url = i::Script::cast(obj->script())->source_url();
@@ -1447,8 +1623,6 @@ Handle<Value> UnboundScript::GetSourceMappingURL() {
i::Handle<i::SharedFunctionInfo> obj =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
i::Isolate* isolate = obj->GetIsolate();
- ON_BAILOUT(isolate, "v8::UnboundScript::GetSourceMappingURL()",
- return Handle<String>());
LOG_API(isolate, "UnboundScript::GetSourceMappingURL");
if (obj->script()->IsScript()) {
i::Object* url = i::Script::cast(obj->script())->source_mapping_url();
@@ -1459,26 +1633,28 @@ Handle<Value> UnboundScript::GetSourceMappingURL() {
}
-Local<Value> Script::Run() {
- i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
- // If execution is terminating, Compile(..)->Run() requires this
- // check.
- if (obj.is_null()) return Local<Value>();
- i::Isolate* isolate = i::Handle<i::HeapObject>::cast(obj)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
- LOG_API(isolate, "Script::Run");
- ENTER_V8(isolate);
+MaybeLocal<Value> Script::Run(Local<Context> context) {
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Script::Run()", Value)
i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy());
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
- EXCEPTION_PREAMBLE(isolate);
+ auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> receiver(isolate->global_proxy(), isolate);
- i::Handle<i::Object> result;
- has_pending_exception = !i::Execution::Call(
- isolate, fun, receiver, 0, NULL).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
- return Utils::ToLocal(scope.CloseAndEscape(result));
+ Local<Value> result;
+ has_pending_exception =
+ !ToLocal<Value>(i::Execution::Call(isolate, fun, receiver, 0, NULL),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<Value> Script::Run() {
+ auto self = Utils::OpenHandle(this, true);
+ // If execution is terminating, Compile(..)->Run() requires this
+ // check.
+ if (self.is_null()) return Local<Value>();
+ auto context = ContextFromHeapObject(self);
+ RETURN_TO_LOCAL_UNCHECKED(Run(context), Value);
}
@@ -1489,12 +1665,12 @@ Local<UnboundScript> Script::GetUnboundScript() {
}
-Local<UnboundScript> ScriptCompiler::CompileUnboundInternal(
+MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
Isolate* v8_isolate, Source* source, CompileOptions options,
bool is_module) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileUnbound()",
- return Local<UnboundScript>());
+ PREPARE_FOR_EXECUTION_WITH_ISOLATE(
+ isolate, "v8::ScriptCompiler::CompileUnbound()", UnboundScript);
// Support the old API for a transition period:
// - kProduceToCache -> kProduceParserCache
@@ -1520,12 +1696,11 @@ Local<UnboundScript> ScriptCompiler::CompileUnboundInternal(
}
i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
- LOG_API(isolate, "ScriptCompiler::CompileUnbound");
- ENTER_V8(isolate);
i::SharedFunctionInfo* raw_result = NULL;
{ i::HandleScope scope(isolate);
i::HistogramTimerScope total(isolate->counters()->compile_script(), true);
i::Handle<i::Object> name_obj;
+ i::Handle<i::Object> source_map_url;
int line_offset = 0;
int column_offset = 0;
bool is_embedder_debug_script = false;
@@ -1548,11 +1723,13 @@ Local<UnboundScript> ScriptCompiler::CompileUnboundInternal(
is_embedder_debug_script =
source->resource_is_embedder_debug_script->IsTrue();
}
- EXCEPTION_PREAMBLE(isolate);
+ if (!source->source_map_url.IsEmpty()) {
+ source_map_url = Utils::OpenHandle(*(source->source_map_url));
+ }
i::Handle<i::SharedFunctionInfo> result = i::Compiler::CompileScript(
str, name_obj, line_offset, column_offset, is_embedder_debug_script,
- is_shared_cross_origin, isolate->native_context(), NULL, &script_data,
- options, i::NOT_NATIVES_CODE, is_module);
+ is_shared_cross_origin, source_map_url, isolate->native_context(), NULL,
+ &script_data, options, i::NOT_NATIVES_CODE, is_module);
has_pending_exception = result.is_null();
if (has_pending_exception && script_data != NULL) {
// This case won't happen during normal operation; we have compiled
@@ -1561,7 +1738,7 @@ Local<UnboundScript> ScriptCompiler::CompileUnboundInternal(
delete script_data;
script_data = NULL;
}
- EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>());
+ RETURN_ON_FAILED_EXECUTION(UnboundScript);
raw_result = *result;
if ((options == kProduceParserCache || options == kProduceCodeCache) &&
@@ -1577,14 +1754,34 @@ Local<UnboundScript> ScriptCompiler::CompileUnboundInternal(
delete script_data;
}
i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
- return ToApiHandle<UnboundScript>(result);
+ RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
+}
+
+
+MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundScript(
+ Isolate* v8_isolate, Source* source, CompileOptions options) {
+ return CompileUnboundInternal(v8_isolate, source, options, false);
}
Local<UnboundScript> ScriptCompiler::CompileUnbound(Isolate* v8_isolate,
Source* source,
CompileOptions options) {
- return CompileUnboundInternal(v8_isolate, source, options, false);
+ RETURN_TO_LOCAL_UNCHECKED(
+ CompileUnboundInternal(v8_isolate, source, options, false),
+ UnboundScript);
+}
+
+
+MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
+ Source* source,
+ CompileOptions options) {
+ auto isolate = context->GetIsolate();
+ auto maybe = CompileUnboundInternal(isolate, source, options, false);
+ Local<UnboundScript> result;
+ if (!maybe.ToLocal(&result)) return MaybeLocal<Script>();
+ v8::Context::Scope scope(context);
+ return result->BindToCurrentContext();
}
@@ -1592,28 +1789,28 @@ Local<Script> ScriptCompiler::Compile(
Isolate* v8_isolate,
Source* source,
CompileOptions options) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()", return Local<Script>());
- LOG_API(isolate, "ScriptCompiler::CompileBound()");
- ENTER_V8(isolate);
- Local<UnboundScript> generic = CompileUnbound(v8_isolate, source, options);
- if (generic.IsEmpty()) return Local<Script>();
+ auto context = v8_isolate->GetCurrentContext();
+ RETURN_TO_LOCAL_UNCHECKED(Compile(context, source, options), Script);
+}
+
+
+MaybeLocal<Script> ScriptCompiler::CompileModule(Local<Context> context,
+ Source* source,
+ CompileOptions options) {
+ CHECK(i::FLAG_harmony_modules);
+ auto isolate = context->GetIsolate();
+ auto maybe = CompileUnboundInternal(isolate, source, options, true);
+ Local<UnboundScript> generic;
+ if (!maybe.ToLocal(&generic)) return MaybeLocal<Script>();
+ v8::Context::Scope scope(context);
return generic->BindToCurrentContext();
}
Local<Script> ScriptCompiler::CompileModule(Isolate* v8_isolate, Source* source,
CompileOptions options) {
- CHECK(i::FLAG_harmony_modules);
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileModule()",
- return Local<Script>());
- LOG_API(isolate, "ScriptCompiler::CompileModule()");
- ENTER_V8(isolate);
- Local<UnboundScript> generic =
- CompileUnboundInternal(v8_isolate, source, options, true);
- if (generic.IsEmpty()) return Local<Script>();
- return generic->BindToCurrentContext();
+ auto context = v8_isolate->GetCurrentContext();
+ RETURN_TO_LOCAL_UNCHECKED(CompileModule(context, source, options), Script);
}
@@ -1656,64 +1853,52 @@ class IsIdentifierHelper {
};
-Local<Function> ScriptCompiler::CompileFunctionInContext(
- Isolate* v8_isolate, Source* source, Local<Context> v8_context,
- size_t arguments_count, Local<String> arguments[],
- size_t context_extension_count, Local<Object> context_extensions[]) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileFunctionInContext()",
- return Local<Function>());
- LOG_API(isolate, "ScriptCompiler::CompileFunctionInContext()");
- ENTER_V8(isolate);
-
+MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
+ Local<Context> v8_context, Source* source, size_t arguments_count,
+ Local<String> arguments[], size_t context_extension_count,
+ Local<Object> context_extensions[]) {
+ PREPARE_FOR_EXECUTION(
+ v8_context, "v8::ScriptCompiler::CompileFunctionInContext()", Function);
i::Handle<i::String> source_string;
+ auto factory = isolate->factory();
if (arguments_count) {
- source_string =
- Utils::OpenHandle(*v8::String::NewFromUtf8(v8_isolate, "(function("));
+ source_string = factory->NewStringFromStaticChars("(function(");
for (size_t i = 0; i < arguments_count; ++i) {
IsIdentifierHelper helper;
if (!helper.Check(*Utils::OpenHandle(*arguments[i]))) {
return Local<Function>();
}
- i::MaybeHandle<i::String> maybe_source =
- isolate->factory()->NewConsString(source_string,
- Utils::OpenHandle(*arguments[i]));
- if (!maybe_source.ToHandle(&source_string)) {
- return Local<Function>();
- }
+ has_pending_exception =
+ !factory->NewConsString(source_string,
+ Utils::OpenHandle(*arguments[i]))
+ .ToHandle(&source_string);
+ RETURN_ON_FAILED_EXECUTION(Function);
if (i + 1 == arguments_count) continue;
- maybe_source = isolate->factory()->NewConsString(
- source_string,
- isolate->factory()->LookupSingleCharacterStringFromCode(','));
- if (!maybe_source.ToHandle(&source_string)) {
- return Local<Function>();
- }
- }
- i::Handle<i::String> brackets =
- Utils::OpenHandle(*v8::String::NewFromUtf8(v8_isolate, "){"));
- i::MaybeHandle<i::String> maybe_source =
- isolate->factory()->NewConsString(source_string, brackets);
- if (!maybe_source.ToHandle(&source_string)) {
- return Local<Function>();
+ has_pending_exception =
+ !factory->NewConsString(source_string,
+ factory->LookupSingleCharacterStringFromCode(
+ ',')).ToHandle(&source_string);
+ RETURN_ON_FAILED_EXECUTION(Function);
}
+ auto brackets = factory->NewStringFromStaticChars("){");
+ has_pending_exception = !factory->NewConsString(source_string, brackets)
+ .ToHandle(&source_string);
+ RETURN_ON_FAILED_EXECUTION(Function);
} else {
- source_string =
- Utils::OpenHandle(*v8::String::NewFromUtf8(v8_isolate, "(function(){"));
+ source_string = factory->NewStringFromStaticChars("(function(){");
}
int scope_position = source_string->length();
- i::MaybeHandle<i::String> maybe_source = isolate->factory()->NewConsString(
- source_string, Utils::OpenHandle(*source->source_string));
- if (!maybe_source.ToHandle(&source_string)) {
- return Local<Function>();
- }
+ has_pending_exception =
+ !factory->NewConsString(source_string,
+ Utils::OpenHandle(*source->source_string))
+ .ToHandle(&source_string);
+ RETURN_ON_FAILED_EXECUTION(Function);
// Include \n in case the source contains a line end comment.
- i::Handle<i::String> brackets =
- Utils::OpenHandle(*v8::String::NewFromUtf8(v8_isolate, "\n})"));
- maybe_source = isolate->factory()->NewConsString(source_string, brackets);
- if (!maybe_source.ToHandle(&source_string)) {
- return Local<Function>();
- }
+ auto brackets = factory->NewStringFromStaticChars("\n})");
+ has_pending_exception =
+ !factory->NewConsString(source_string, brackets).ToHandle(&source_string);
+ RETURN_ON_FAILED_EXECUTION(Function);
i::Handle<i::Context> context = Utils::OpenHandle(*v8_context);
i::Handle<i::SharedFunctionInfo> outer_info(context->closure()->shared(),
@@ -1722,23 +1907,34 @@ Local<Function> ScriptCompiler::CompileFunctionInContext(
i::Handle<i::JSObject> extension =
Utils::OpenHandle(*context_extensions[i]);
i::Handle<i::JSFunction> closure(context->closure(), isolate);
- context = isolate->factory()->NewWithContext(closure, context, extension);
+ context = factory->NewWithContext(closure, context, extension);
}
- EXCEPTION_PREAMBLE(isolate);
- i::MaybeHandle<i::JSFunction> maybe_fun = i::Compiler::GetFunctionFromEval(
- source_string, outer_info, context, i::SLOPPY,
- i::ONLY_SINGLE_FUNCTION_LITERAL, scope_position);
i::Handle<i::JSFunction> fun;
- has_pending_exception = !maybe_fun.ToHandle(&fun);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Function>());
+ has_pending_exception =
+ !i::Compiler::GetFunctionFromEval(
+ source_string, outer_info, context, i::SLOPPY,
+ i::ONLY_SINGLE_FUNCTION_LITERAL, scope_position).ToHandle(&fun);
+ RETURN_ON_FAILED_EXECUTION(Function);
- i::MaybeHandle<i::Object> result = i::Execution::Call(
- isolate, fun, Utils::OpenHandle(*v8_context->Global()), 0, NULL);
- i::Handle<i::Object> final_result;
- has_pending_exception = !result.ToHandle(&final_result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Function>());
- return Utils::ToLocal(i::Handle<i::JSFunction>::cast(final_result));
+ i::Handle<i::Object> result;
+ has_pending_exception =
+ !i::Execution::Call(isolate, fun,
+ Utils::OpenHandle(*v8_context->Global()), 0,
+ nullptr).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(Function);
+ RETURN_ESCAPED(Utils::ToLocal(i::Handle<i::JSFunction>::cast(result)));
+}
+
+
+Local<Function> ScriptCompiler::CompileFunctionInContext(
+ Isolate* v8_isolate, Source* source, Local<Context> v8_context,
+ size_t arguments_count, Local<String> arguments[],
+ size_t context_extension_count, Local<Object> context_extensions[]) {
+ RETURN_TO_LOCAL_UNCHECKED(
+ CompileFunctionInContext(v8_context, source, arguments_count, arguments,
+ context_extension_count, context_extensions),
+ Function);
}
@@ -1750,17 +1946,13 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
}
-Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
- StreamedSource* v8_source,
- Handle<String> full_source_string,
- const ScriptOrigin& origin) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
+ StreamedSource* v8_source,
+ Handle<String> full_source_string,
+ const ScriptOrigin& origin) {
+ PREPARE_FOR_EXECUTION(context, "v8::ScriptCompiler::Compile()", Script);
i::StreamedSource* source = v8_source->impl();
- ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()", return Local<Script>());
- LOG_API(isolate, "ScriptCompiler::Compile()");
- ENTER_V8(isolate);
- i::SharedFunctionInfo* raw_result = NULL;
-
+ i::SharedFunctionInfo* raw_result = nullptr;
{
i::HandleScope scope(isolate);
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
@@ -1784,38 +1976,50 @@ Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
script->set_is_embedder_debug_script(
origin.ResourceIsEmbedderDebugScript()->IsTrue());
}
- source->info->set_script(script);
- source->info->SetContext(isolate->native_context());
+ if (!origin.SourceMapUrl().IsEmpty()) {
+ script->set_source_mapping_url(
+ *Utils::OpenHandle(*(origin.SourceMapUrl())));
+ }
- EXCEPTION_PREAMBLE(isolate);
+ source->info->set_script(script);
+ source->info->set_context(isolate->native_context());
// Do the parsing tasks which need to be done on the main thread. This will
// also handle parse errors.
- source->parser->Internalize(source->info.get());
- source->parser->HandleSourceURLComments(source->info.get());
+ source->parser->Internalize(isolate, script,
+ source->info->function() == nullptr);
+ source->parser->HandleSourceURLComments(isolate, script);
- i::Handle<i::SharedFunctionInfo> result =
- i::Handle<i::SharedFunctionInfo>::null();
- if (source->info->function() != NULL) {
+ i::Handle<i::SharedFunctionInfo> result;
+ if (source->info->function() != nullptr) {
// Parsing has succeeded.
- result =
- i::Compiler::CompileStreamedScript(source->info.get(), str->length());
+ result = i::Compiler::CompileStreamedScript(script, source->info.get(),
+ str->length());
}
has_pending_exception = result.is_null();
if (has_pending_exception) isolate->ReportPendingMessages();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
+ RETURN_ON_FAILED_EXECUTION(Script);
+
+ source->info->clear_script(); // because script goes out of scope.
+ raw_result = *result; // TODO(titzer): use CloseAndEscape?
+ }
- raw_result = *result;
- // The Handle<Script> will go out of scope soon; make sure CompilationInfo
- // doesn't point to it.
- source->info->set_script(i::Handle<i::Script>());
- } // HandleScope goes out of scope.
i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
Local<UnboundScript> generic = ToApiHandle<UnboundScript>(result);
- if (generic.IsEmpty()) {
- return Local<Script>();
- }
- return generic->BindToCurrentContext();
+ if (generic.IsEmpty()) return Local<Script>();
+ Local<Script> bound = generic->BindToCurrentContext();
+ if (bound.IsEmpty()) return Local<Script>();
+ RETURN_ESCAPED(bound);
+}
+
+
+Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
+ StreamedSource* v8_source,
+ Handle<String> full_source_string,
+ const ScriptOrigin& origin) {
+ auto context = v8_isolate->GetCurrentContext();
+ RETURN_TO_LOCAL_UNCHECKED(
+ Compile(context, v8_source, full_source_string, origin), Script);
}
@@ -1826,19 +2030,23 @@ uint32_t ScriptCompiler::CachedDataVersionTag() {
}
-Local<Script> Script::Compile(v8::Handle<String> source,
- v8::ScriptOrigin* origin) {
- i::Handle<i::String> str = Utils::OpenHandle(*source);
+MaybeLocal<Script> Script::Compile(Local<Context> context,
+ Handle<String> source,
+ ScriptOrigin* origin) {
if (origin) {
ScriptCompiler::Source script_source(source, *origin);
- return ScriptCompiler::Compile(
- reinterpret_cast<v8::Isolate*>(str->GetIsolate()),
- &script_source);
+ return ScriptCompiler::Compile(context, &script_source);
}
ScriptCompiler::Source script_source(source);
- return ScriptCompiler::Compile(
- reinterpret_cast<v8::Isolate*>(str->GetIsolate()),
- &script_source);
+ return ScriptCompiler::Compile(context, &script_source);
+}
+
+
+Local<Script> Script::Compile(v8::Handle<String> source,
+ v8::ScriptOrigin* origin) {
+ auto str = Utils::OpenHandle(*source);
+ auto context = ContextFromHeapObject(str);
+ RETURN_TO_LOCAL_UNCHECKED(Compile(context, source, origin), Script);
}
@@ -1894,7 +2102,7 @@ v8::TryCatch::~TryCatch() {
if (HasCaught() && capture_message_) {
// If an exception was caught and rethrow_ is indicated, the saved
// message, script, and location need to be restored to Isolate TLS
- // for reuse. capture_message_ needs to be disabled so that DoThrow()
+ // for reuse. capture_message_ needs to be disabled so that Throw()
// does not create a new message.
isolate_->thread_local_top()->rethrowing_message_ = true;
isolate_->RestorePendingMessageFromTryCatch(this);
@@ -1949,28 +2157,28 @@ v8::Local<Value> v8::TryCatch::Exception() const {
}
+MaybeLocal<Value> v8::TryCatch::StackTrace(Local<Context> context) const {
+ if (!HasCaught()) return v8::Local<Value>();
+ i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
+ if (!raw_obj->IsJSObject()) return v8::Local<Value>();
+ PREPARE_FOR_EXECUTION(context, "v8::TryCatch::StackTrace", Value);
+ i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
+ i::Handle<i::String> name = isolate->factory()->stack_string();
+ Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name);
+ has_pending_exception = !maybe.IsJust();
+ RETURN_ON_FAILED_EXECUTION(Value);
+ if (!maybe.FromJust()) return v8::Local<Value>();
+ Local<Value> result;
+ has_pending_exception =
+ !ToLocal<Value>(i::Object::GetProperty(obj, name), &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+
v8::Local<Value> v8::TryCatch::StackTrace() const {
- if (HasCaught()) {
- i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
- if (!raw_obj->IsJSObject()) return v8::Local<Value>();
- i::HandleScope scope(isolate_);
- i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
- i::Handle<i::String> name = isolate_->factory()->stack_string();
- {
- EXCEPTION_PREAMBLE(isolate_);
- Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name);
- has_pending_exception = !maybe.has_value;
- EXCEPTION_BAILOUT_CHECK(isolate_, v8::Local<Value>());
- if (!maybe.value) return v8::Local<Value>();
- }
- i::Handle<i::Object> value;
- EXCEPTION_PREAMBLE(isolate_);
- has_pending_exception = !i::Object::GetProperty(obj, name).ToHandle(&value);
- EXCEPTION_BAILOUT_CHECK(isolate_, v8::Local<Value>());
- return v8::Utils::ToLocal(scope.CloseAndEscape(value));
- } else {
- return v8::Local<Value>();
- }
+ auto context = reinterpret_cast<v8::Isolate*>(isolate_)->GetCurrentContext();
+ RETURN_TO_LOCAL_UNCHECKED(StackTrace(context), Value);
}
@@ -2000,9 +2208,6 @@ void v8::TryCatch::ResetInternal() {
i::Object* the_hole = isolate_->heap()->the_hole_value();
exception_ = the_hole;
message_obj_ = the_hole;
- message_script_ = the_hole;
- message_start_pos_ = 0;
- message_end_pos_ = 0;
}
@@ -2021,7 +2226,6 @@ void v8::TryCatch::SetCaptureMessage(bool value) {
Local<String> Message::Get() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::Get()", return Local<String>());
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::Object> obj = Utils::OpenHandle(this);
@@ -2033,24 +2237,11 @@ Local<String> Message::Get() const {
ScriptOrigin Message::GetScriptOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::Object> script_wraper =
- i::Handle<i::Object>(message->script(), isolate);
- i::Handle<i::JSValue> script_value =
- i::Handle<i::JSValue>::cast(script_wraper);
+ auto message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
+ auto script_wraper = i::Handle<i::Object>(message->script(), isolate);
+ auto script_value = i::Handle<i::JSValue>::cast(script_wraper);
i::Handle<i::Script> script(i::Script::cast(script_value->value()));
- i::Handle<i::Object> scriptName(i::Script::GetNameOrSourceURL(script));
- v8::Isolate* v8_isolate =
- reinterpret_cast<v8::Isolate*>(script->GetIsolate());
- v8::ScriptOrigin origin(
- Utils::ToLocal(scriptName),
- v8::Integer::New(v8_isolate, script->line_offset()->value()),
- v8::Integer::New(v8_isolate, script->column_offset()->value()),
- v8::Boolean::New(v8_isolate, script->is_shared_cross_origin()),
- v8::Integer::New(v8_isolate, script->id()->value()),
- v8::Boolean::New(v8_isolate, script->is_embedder_debug_script()));
- return origin;
+ return GetScriptOriginForScript(isolate, script);
}
@@ -2063,12 +2254,10 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
+ auto message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>();
- i::Handle<i::JSArray> stackTrace =
- i::Handle<i::JSArray>::cast(stackFramesObj);
+ auto stackTrace = i::Handle<i::JSArray>::cast(stackFramesObj);
return scope.Escape(Utils::StackTraceToLocal(stackTrace));
}
@@ -2092,107 +2281,102 @@ MUST_USE_RESULT static i::MaybeHandle<i::Object> CallV8HeapFunction(
}
-int Message::GetLineNumber() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::GetLineNumber()", return kNoLineNumberInfo);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
-
- EXCEPTION_PREAMBLE(isolate);
+Maybe<int> Message::GetLineNumber(Local<Context> context) const {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetLineNumber()", int);
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "GetLineNumber", Utils::OpenHandle(this))
.ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- return static_cast<int>(result->Number());
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
+ return Just(static_cast<int>(result->Number()));
+}
+
+
+int Message::GetLineNumber() const {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return GetLineNumber(context).FromMaybe(0);
}
int Message::GetStartPosition() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- return message->start_position();
+ auto self = Utils::OpenHandle(this);
+ return self->start_position();
}
int Message::GetEndPosition() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- return message->end_position();
+ auto self = Utils::OpenHandle(this);
+ return self->end_position();
+}
+
+
+Maybe<int> Message::GetStartColumn(Local<Context> context) const {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetStartColumn()",
+ int);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> start_col_obj;
+ has_pending_exception = !CallV8HeapFunction(isolate, "GetPositionInLine",
+ self).ToHandle(&start_col_obj);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
+ return Just(static_cast<int>(start_col_obj->Number()));
}
int Message::GetStartColumn() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::GetStartColumn()", return kNoColumnInfo);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ const int default_value = kNoColumnInfo;
+ return GetStartColumn(context).FromMaybe(default_value);
+}
+
+
+Maybe<int> Message::GetEndColumn(Local<Context> context) const {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Message::GetEndColumn()", int);
+ auto self = Utils::OpenHandle(this);
i::Handle<i::Object> start_col_obj;
- has_pending_exception =
- !CallV8HeapFunction(isolate, "GetPositionInLine", data_obj)
- .ToHandle(&start_col_obj);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- return static_cast<int>(start_col_obj->Number());
+ has_pending_exception = !CallV8HeapFunction(isolate, "GetPositionInLine",
+ self).ToHandle(&start_col_obj);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int);
+ int start = self->start_position();
+ int end = self->end_position();
+ return Just(static_cast<int>(start_col_obj->Number()) + (end - start));
}
int Message::GetEndColumn() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::GetEndColumn()", return kNoColumnInfo);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> start_col_obj;
- has_pending_exception =
- !CallV8HeapFunction(isolate, "GetPositionInLine", data_obj)
- .ToHandle(&start_col_obj);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(data_obj);
- int start = message->start_position();
- int end = message->end_position();
- return static_cast<int>(start_col_obj->Number()) + (end - start);
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ const int default_value = kNoColumnInfo;
+ return GetEndColumn(context).FromMaybe(default_value);
}
bool Message::IsSharedCrossOrigin() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSMessageObject> message =
- i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
- i::Handle<i::JSValue> script =
- i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
- isolate));
+ auto self = Utils::OpenHandle(this);
+ auto script = i::Handle<i::JSValue>::cast(
+ i::Handle<i::Object>(self->script(), isolate));
return i::Script::cast(script->value())->is_shared_cross_origin();
}
-Local<String> Message::GetSourceLine() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Message::GetSourceLine()", return Local<String>());
- ENTER_V8(isolate);
- EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- EXCEPTION_PREAMBLE(isolate);
+MaybeLocal<String> Message::GetSourceLine(Local<Context> context) const {
+ PREPARE_FOR_EXECUTION(context, "v8::Message::GetSourceLine()", String);
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "GetSourceLine", Utils::OpenHandle(this))
.ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::String>());
+ RETURN_ON_FAILED_EXECUTION(String);
+ Local<String> str;
if (result->IsString()) {
- return scope.Escape(Utils::ToLocal(i::Handle<i::String>::cast(result)));
- } else {
- return Local<String>();
+ str = Utils::ToLocal(i::Handle<i::String>::cast(result));
}
+ RETURN_ESCAPED(str);
+}
+
+
+Local<String> Message::GetSourceLine() const {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(GetSourceLine(context), String)
}
@@ -2209,24 +2393,19 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- i::Handle<i::Object> obj =
- i::Object::GetElement(isolate, self, index).ToHandleChecked();
- i::Handle<i::JSObject> jsobj = i::Handle<i::JSObject>::cast(obj);
+ auto self = Utils::OpenHandle(this);
+ auto obj = i::Object::GetElement(isolate, self, index).ToHandleChecked();
+ auto jsobj = i::Handle<i::JSObject>::cast(obj);
return scope.Escape(Utils::StackFrameToLocal(jsobj));
}
int StackTrace::GetFrameCount() const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
}
Local<Array> StackTrace::AsArray() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
return Utils::ToLocal(Utils::OpenHandle(this));
}
@@ -2419,21 +2598,25 @@ bool NativeWeakMap::Delete(Handle<Value> v8_key) {
// --- J S O N ---
-Local<Value> JSON::Parse(Local<String> json_string) {
+MaybeLocal<Value> JSON::Parse(Isolate* v8_isolate, Local<String> json_string) {
+ auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, "JSON::Parse", Value);
i::Handle<i::String> string = Utils::OpenHandle(*json_string);
- i::Isolate* isolate = string->GetIsolate();
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
i::Handle<i::String> source = i::String::Flatten(string);
- EXCEPTION_PREAMBLE(isolate);
- i::MaybeHandle<i::Object> maybe_result =
- source->IsSeqOneByteString() ? i::JsonParser<true>::Parse(source)
- : i::JsonParser<false>::Parse(source);
- i::Handle<i::Object> result;
- has_pending_exception = !maybe_result.ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
- return Utils::ToLocal(
- i::Handle<i::Object>::cast(scope.CloseAndEscape(result)));
+ auto maybe = source->IsSeqOneByteString()
+ ? i::JsonParser<true>::Parse(source)
+ : i::JsonParser<false>::Parse(source);
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(maybe, &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<Value> JSON::Parse(Local<String> json_string) {
+ auto isolate = reinterpret_cast<v8::Isolate*>(
+ Utils::OpenHandle(*json_string)->GetIsolate());
+ RETURN_TO_LOCAL_UNCHECKED(Parse(isolate, json_string), Value);
}
@@ -2591,7 +2774,7 @@ bool Value::IsUint32() const {
static bool CheckConstructor(i::Isolate* isolate,
i::Handle<i::JSObject> obj,
const char* class_name) {
- i::Handle<i::Object> constr(obj->map()->constructor(), isolate);
+ i::Handle<i::Object> constr(obj->map()->GetConstructor(), isolate);
if (!constr->IsJSFunction()) return false;
i::Handle<i::JSFunction> func = i::Handle<i::JSFunction>::cast(constr);
return func->shared()->native() && constr.is_identical_to(
@@ -2648,108 +2831,137 @@ bool Value::IsSetIterator() const {
}
-Local<String> Value::ToString(Isolate* v8_isolate) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> str;
- if (obj->IsString()) {
- str = obj;
- } else {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, "ToString");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception = !i::Execution::ToString(
- isolate, obj).ToHandle(&str);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
- }
- return ToApiHandle<String>(str);
+MaybeLocal<String> Value::ToString(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsString()) return ToApiHandle<String>(obj);
+ PREPARE_FOR_EXECUTION(context, "ToString", String);
+ Local<String> result;
+ has_pending_exception =
+ !ToLocal<String>(i::Execution::ToString(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(String);
+ RETURN_ESCAPED(result);
}
-Local<String> Value::ToDetailString(Isolate* v8_isolate) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> str;
- if (obj->IsString()) {
- str = obj;
- } else {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, "ToDetailString");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception = !i::Execution::ToDetailString(
- isolate, obj).ToHandle(&str);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
- }
- return ToApiHandle<String>(str);
+Local<String> Value::ToString(Isolate* isolate) const {
+ RETURN_TO_LOCAL_UNCHECKED(ToString(isolate->GetCurrentContext()), String);
}
-Local<v8::Object> Value::ToObject(Isolate* v8_isolate) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> val;
- if (obj->IsJSObject()) {
- val = obj;
- } else {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, "ToObject");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception = !i::Execution::ToObject(
- isolate, obj).ToHandle(&val);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
- }
- return ToApiHandle<Object>(val);
+MaybeLocal<String> Value::ToDetailString(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsString()) return ToApiHandle<String>(obj);
+ PREPARE_FOR_EXECUTION(context, "ToDetailString", String);
+ Local<String> result;
+ has_pending_exception =
+ !ToLocal<String>(i::Execution::ToDetailString(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(String);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<String> Value::ToDetailString(Isolate* isolate) const {
+ RETURN_TO_LOCAL_UNCHECKED(ToDetailString(isolate->GetCurrentContext()),
+ String);
+}
+
+
+MaybeLocal<Object> Value::ToObject(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsJSObject()) return ToApiHandle<Object>(obj);
+ PREPARE_FOR_EXECUTION(context, "ToObject", Object);
+ Local<Object> result;
+ has_pending_exception =
+ !ToLocal<Object>(i::Execution::ToObject(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(Object);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<v8::Object> Value::ToObject(Isolate* isolate) const {
+ RETURN_TO_LOCAL_UNCHECKED(ToObject(isolate->GetCurrentContext()), Object);
+}
+
+
+MaybeLocal<Boolean> Value::ToBoolean(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsBoolean()) return ToApiHandle<Boolean>(obj);
+ auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
+ auto val = isolate->factory()->ToBoolean(obj->BooleanValue());
+ return ToApiHandle<Boolean>(val);
}
Local<Boolean> Value::ToBoolean(Isolate* v8_isolate) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsBoolean()) {
- return ToApiHandle<Boolean>(obj);
- } else {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, "ToBoolean");
- ENTER_V8(isolate);
- i::Handle<i::Object> val =
- isolate->factory()->ToBoolean(obj->BooleanValue());
- return ToApiHandle<Boolean>(val);
- }
+ return ToBoolean(v8_isolate->GetCurrentContext()).ToLocalChecked();
}
-Local<Number> Value::ToNumber(Isolate* v8_isolate) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsNumber()) {
- num = obj;
- } else {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, "ToNumber");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception = !i::Execution::ToNumber(
- isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Number>());
- }
- return ToApiHandle<Number>(num);
+MaybeLocal<Number> Value::ToNumber(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsNumber()) return ToApiHandle<Number>(obj);
+ PREPARE_FOR_EXECUTION(context, "ToNumber", Number);
+ Local<Number> result;
+ has_pending_exception =
+ !ToLocal<Number>(i::Execution::ToNumber(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(Number);
+ RETURN_ESCAPED(result);
}
-Local<Integer> Value::ToInteger(Isolate* v8_isolate) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
- if (obj->IsSmi()) {
- num = obj;
- } else {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, "ToInteger");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception = !i::Execution::ToInteger(
- isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Integer>());
- }
- return ToApiHandle<Integer>(num);
+Local<Number> Value::ToNumber(Isolate* isolate) const {
+ RETURN_TO_LOCAL_UNCHECKED(ToNumber(isolate->GetCurrentContext()), Number);
+}
+
+
+MaybeLocal<Integer> Value::ToInteger(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) return ToApiHandle<Integer>(obj);
+ PREPARE_FOR_EXECUTION(context, "ToInteger", Integer);
+ Local<Integer> result;
+ has_pending_exception =
+ !ToLocal<Integer>(i::Execution::ToInteger(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(Integer);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<Integer> Value::ToInteger(Isolate* isolate) const {
+ RETURN_TO_LOCAL_UNCHECKED(ToInteger(isolate->GetCurrentContext()), Integer);
+}
+
+
+MaybeLocal<Int32> Value::ToInt32(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) return ToApiHandle<Int32>(obj);
+ Local<Int32> result;
+ PREPARE_FOR_EXECUTION(context, "ToInt32", Int32);
+ has_pending_exception =
+ !ToLocal<Int32>(i::Execution::ToInt32(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(Int32);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<Int32> Value::ToInt32(Isolate* isolate) const {
+ RETURN_TO_LOCAL_UNCHECKED(ToInt32(isolate->GetCurrentContext()), Int32);
+}
+
+
+MaybeLocal<Uint32> Value::ToUint32(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsSmi()) return ToApiHandle<Uint32>(obj);
+ Local<Uint32> result;
+ PREPARE_FOR_EXECUTION(context, "ToUInt32", Uint32);
+ has_pending_exception =
+ !ToLocal<Uint32>(i::Execution::ToUint32(isolate, obj), &result);
+ RETURN_ON_FAILED_EXECUTION(Uint32);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<Uint32> Value::ToUint32(Isolate* isolate) const {
+ RETURN_TO_LOCAL_UNCHECKED(ToUint32(isolate->GetCurrentContext()), Uint32);
}
@@ -2785,6 +2997,14 @@ void v8::Function::CheckCast(Value* that) {
}
+void v8::Boolean::CheckCast(v8::Value* that) {
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ Utils::ApiCheck(obj->IsBoolean(),
+ "v8::Boolean::Cast()",
+ "Could not convert to boolean");
+}
+
+
void v8::Name::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsName(),
@@ -2825,6 +3045,18 @@ void v8::Integer::CheckCast(v8::Value* that) {
}
+void v8::Int32::CheckCast(v8::Value* that) {
+ Utils::ApiCheck(that->IsInt32(), "v8::Int32::Cast()",
+ "Could not convert to 32-bit signed integer");
+}
+
+
+void v8::Uint32::CheckCast(v8::Value* that) {
+ Utils::ApiCheck(that->IsUint32(), "v8::Uint32::Cast()",
+ "Could not convert to 32-bit unsigned integer");
+}
+
+
void v8::Array::CheckCast(Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
Utils::ApiCheck(obj->IsJSArray(),
@@ -2958,100 +3190,113 @@ void v8::RegExp::CheckCast(v8::Value* that) {
}
+Maybe<bool> Value::BooleanValue(Local<Context> context) const {
+ return Just(Utils::OpenHandle(this)->BooleanValue());
+}
+
+
bool Value::BooleanValue() const {
return Utils::OpenHandle(this)->BooleanValue();
}
+Maybe<double> Value::NumberValue(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsNumber()) return Just(obj->Number());
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "NumberValue", double);
+ i::Handle<i::Object> num;
+ has_pending_exception = !i::Execution::ToNumber(isolate, obj).ToHandle(&num);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(double);
+ return Just(num->Number());
+}
+
+
double Value::NumberValue() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsNumber()) return obj->Number();
+ return NumberValue(ContextFromHeapObject(obj))
+ .FromMaybe(std::numeric_limits<double>::quiet_NaN());
+}
+
+
+Maybe<int64_t> Value::IntegerValue(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
i::Handle<i::Object> num;
if (obj->IsNumber()) {
num = obj;
} else {
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- LOG_API(isolate, "NumberValue");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception = !i::Execution::ToNumber(
- isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, std::numeric_limits<double>::quiet_NaN());
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "IntegerValue", int64_t);
+ has_pending_exception =
+ !i::Execution::ToInteger(isolate, obj).ToHandle(&num);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int64_t);
}
- return num->Number();
+ return Just(num->IsSmi() ? static_cast<int64_t>(i::Smi::cast(*num)->value())
+ : static_cast<int64_t>(num->Number()));
}
int64_t Value::IntegerValue() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> num;
+ auto obj = Utils::OpenHandle(this);
if (obj->IsNumber()) {
- num = obj;
- } else {
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- LOG_API(isolate, "IntegerValue");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception = !i::Execution::ToInteger(
- isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- }
- if (num->IsSmi()) {
- return i::Smi::cast(*num)->value();
- } else {
- return static_cast<int64_t>(num->Number());
+ if (obj->IsSmi()) {
+ return i::Smi::cast(*obj)->value();
+ } else {
+ return static_cast<int64_t>(obj->Number());
+ }
}
+ return IntegerValue(ContextFromHeapObject(obj)).FromMaybe(0);
}
-Local<Int32> Value::ToInt32(Isolate* v8_isolate) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
+Maybe<int32_t> Value::Int32Value(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsNumber()) return Just(NumberToInt32(*obj));
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Int32Value", int32_t);
i::Handle<i::Object> num;
- if (obj->IsSmi()) {
- num = obj;
- } else {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, "ToInt32");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception = !i::Execution::ToInt32(isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Int32>());
- }
- return ToApiHandle<Int32>(num);
+ has_pending_exception = !i::Execution::ToInt32(isolate, obj).ToHandle(&num);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(int32_t);
+ return Just(num->IsSmi() ? i::Smi::cast(*num)->value()
+ : static_cast<int32_t>(num->Number()));
}
-Local<Uint32> Value::ToUint32(Isolate* v8_isolate) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
+int32_t Value::Int32Value() const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsNumber()) return NumberToInt32(*obj);
+ return Int32Value(ContextFromHeapObject(obj)).FromMaybe(0);
+}
+
+
+Maybe<uint32_t> Value::Uint32Value(Local<Context> context) const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsNumber()) return Just(NumberToUint32(*obj));
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Uint32Value", uint32_t);
i::Handle<i::Object> num;
- if (obj->IsSmi()) {
- num = obj;
- } else {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, "ToUInt32");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception = !i::Execution::ToUint32(
- isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
- }
- return ToApiHandle<Uint32>(num);
+ has_pending_exception = !i::Execution::ToUint32(isolate, obj).ToHandle(&num);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(uint32_t);
+ return Just(num->IsSmi() ? static_cast<uint32_t>(i::Smi::cast(*num)->value())
+ : static_cast<uint32_t>(num->Number()));
}
-Local<Uint32> Value::ToArrayIndex() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsSmi()) {
- if (i::Smi::cast(*obj)->value() >= 0) return Utils::Uint32ToLocal(obj);
+uint32_t Value::Uint32Value() const {
+ auto obj = Utils::OpenHandle(this);
+ if (obj->IsNumber()) return NumberToUint32(*obj);
+ return Uint32Value(ContextFromHeapObject(obj)).FromMaybe(0);
+}
+
+
+MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
+ auto self = Utils::OpenHandle(this);
+ if (self->IsSmi()) {
+ if (i::Smi::cast(*self)->value() >= 0) return Utils::Uint32ToLocal(self);
return Local<Uint32>();
}
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- LOG_API(isolate, "ToArrayIndex");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
+ PREPARE_FOR_EXECUTION(context, "ToArrayIndex", Uint32);
i::Handle<i::Object> string_obj;
- has_pending_exception = !i::Execution::ToString(
- isolate, obj).ToHandle(&string_obj);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Uint32>());
+ has_pending_exception =
+ !i::Execution::ToString(isolate, self).ToHandle(&string_obj);
+ RETURN_ON_FAILED_EXECUTION(Uint32);
i::Handle<i::String> str = i::Handle<i::String>::cast(string_obj);
uint32_t index;
if (str->AsArrayIndex(&index)) {
@@ -3061,77 +3306,65 @@ Local<Uint32> Value::ToArrayIndex() const {
} else {
value = isolate->factory()->NewNumber(index);
}
- return Utils::Uint32ToLocal(value);
+ RETURN_ESCAPED(Utils::Uint32ToLocal(value));
}
return Local<Uint32>();
}
-int32_t Value::Int32Value() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsNumber()) {
- return NumberToInt32(*obj);
- } else {
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- LOG_API(isolate, "Int32Value (slow)");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> num;
- has_pending_exception = !i::Execution::ToInt32(isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- if (num->IsSmi()) {
- return i::Smi::cast(*num)->value();
- } else {
- return static_cast<int32_t>(num->Number());
- }
+Local<Uint32> Value::ToArrayIndex() const {
+ auto self = Utils::OpenHandle(this);
+ if (self->IsSmi()) {
+ if (i::Smi::cast(*self)->value() >= 0) return Utils::Uint32ToLocal(self);
+ return Local<Uint32>();
}
+ auto context = ContextFromHeapObject(self);
+ RETURN_TO_LOCAL_UNCHECKED(ToArrayIndex(context), Uint32);
}
-bool Value::Equals(Handle<Value> that) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
- i::Handle<i::Object> other = Utils::OpenHandle(*that);
- if (obj->IsSmi() && other->IsSmi()) {
- return obj->Number() == other->Number();
+Maybe<bool> Value::Equals(Local<Context> context, Handle<Value> that) const {
+ auto self = Utils::OpenHandle(this);
+ auto other = Utils::OpenHandle(*that);
+ if (self->IsSmi() && other->IsSmi()) {
+ return Just(self->Number() == other->Number());
}
- i::Object* ho = obj->IsSmi() ? *other : *obj;
- i::Isolate* isolate = i::HeapObject::cast(ho)->GetIsolate();
- if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
- "v8::Value::Equals()",
- "Reading from empty handle")) {
- return false;
- }
- LOG_API(isolate, "Equals");
- ENTER_V8(isolate);
- // If both obj and other are JSObjects, we'd better compare by identity
- // immediately when going into JS builtin. The reason is Invoke
- // would overwrite global object receiver with global proxy.
- if (obj->IsJSObject() && other->IsJSObject()) {
- return *obj == *other;
+ if (self->IsJSObject() && other->IsJSObject()) {
+ return Just(*self == *other);
}
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Value::Equals()", bool);
i::Handle<i::Object> args[] = { other };
- EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result;
has_pending_exception =
- !CallV8HeapFunction(isolate, "EQUALS", obj, arraysize(args), args)
+ !CallV8HeapFunction(isolate, "EQUALS", self, arraysize(args), args)
.ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return *result == i::Smi::FromInt(i::EQUAL);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(*result == i::Smi::FromInt(i::EQUAL));
+}
+
+
+bool Value::Equals(Handle<Value> that) const {
+ auto self = Utils::OpenHandle(this);
+ auto other = Utils::OpenHandle(*that);
+ if (self->IsSmi() && other->IsSmi()) {
+ return self->Number() == other->Number();
+ }
+ if (self->IsJSObject() && other->IsJSObject()) {
+ return *self == *other;
+ }
+ auto heap_object = self->IsSmi() ? other : self;
+ auto context = ContextFromHeapObject(heap_object);
+ return Equals(context, that).FromMaybe(false);
}
bool Value::StrictEquals(Handle<Value> that) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::Object> other = Utils::OpenHandle(*that);
if (obj->IsSmi()) {
return other->IsNumber() && obj->Number() == other->Number();
}
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
- "v8::Value::StrictEquals()",
- "Reading from empty handle")) {
- return false;
- }
LOG_API(isolate, "StrictEquals");
// Must check HeapNumber first, since NaN !== NaN.
if (obj->IsHeapNumber()) {
@@ -3157,89 +3390,71 @@ bool Value::StrictEquals(Handle<Value> that) const {
bool Value::SameValue(Handle<Value> that) const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
- if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
- "v8::Value::SameValue()",
- "Reading from empty handle")) {
- return false;
- }
- i::Handle<i::Object> other = Utils::OpenHandle(*that);
- return obj->SameValue(*other);
+ auto self = Utils::OpenHandle(this);
+ auto other = Utils::OpenHandle(*that);
+ return self->SameValue(*other);
}
-uint32_t Value::Uint32Value() const {
- i::Handle<i::Object> obj = Utils::OpenHandle(this);
- if (obj->IsNumber()) {
- return NumberToUint32(*obj);
- } else {
- i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
- LOG_API(isolate, "Uint32Value");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> num;
- has_pending_exception = !i::Execution::ToUint32(
- isolate, obj).ToHandle(&num);
- EXCEPTION_BAILOUT_CHECK(isolate, 0);
- if (num->IsSmi()) {
- return i::Smi::cast(*num)->value();
- } else {
- return static_cast<uint32_t>(num->Number());
- }
- }
+Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
+ v8::Local<Value> key, v8::Local<Value> value) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ auto value_obj = Utils::OpenHandle(*value);
+ has_pending_exception =
+ i::Runtime::SetObjectProperty(isolate, self, key_obj, value_obj,
+ i::SLOPPY).is_null();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(true);
}
bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Set()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
- has_pending_exception =
- i::Runtime::SetObjectProperty(isolate, self, key_obj, value_obj,
- i::SLOPPY).is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return Set(context, key, value).FromMaybe(false);
}
-bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Set()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
+Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context, uint32_t index,
+ v8::Local<Value> value) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto value_obj = Utils::OpenHandle(*value);
has_pending_exception = i::JSObject::SetElement(
self, index, value_obj, NONE, i::SLOPPY).is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(true);
}
-bool v8::Object::ForceSet(v8::Handle<Value> key,
- v8::Handle<Value> value,
- v8::PropertyAttribute attribs) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::ForceSet()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
- EXCEPTION_PREAMBLE(isolate);
+bool v8::Object::Set(uint32_t index, v8::Handle<Value> value) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return Set(context, index, value).FromMaybe(false);
+}
+
+
+Maybe<bool> v8::Object::ForceSet(v8::Local<v8::Context> context,
+ v8::Local<Value> key, v8::Local<Value> value,
+ v8::PropertyAttribute attribs) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Set()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ auto value_obj = Utils::OpenHandle(*value);
has_pending_exception = i::Runtime::DefineObjectProperty(
self,
key_obj,
value_obj,
static_cast<PropertyAttributes>(attribs)).is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(true);
+}
+
+
+bool v8::Object::ForceSet(v8::Handle<Value> key, v8::Handle<Value> value,
+ v8::PropertyAttribute attribs) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return ForceSet(context, key, value, attribs).FromMaybe(false);
}
@@ -3249,6 +3464,8 @@ bool v8::Object::SetPrivate(v8::Handle<Private> key, v8::Handle<Value> value) {
}
+namespace {
+
i::MaybeHandle<i::Object> DeleteObjectProperty(
i::Isolate* isolate, i::Handle<i::JSReceiver> receiver,
i::Handle<i::Object> key, i::LanguageMode language_mode) {
@@ -3286,33 +3503,42 @@ i::MaybeHandle<i::Object> DeleteObjectProperty(
return i::JSReceiver::DeleteProperty(receiver, name, language_mode);
}
+} // namespace
-Local<Value> v8::Object::Get(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::Object> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- EXCEPTION_PREAMBLE(isolate);
+
+MaybeLocal<Value> v8::Object::Get(Local<v8::Context> context,
+ Local<Value> key) {
+ PREPARE_FOR_EXECUTION(context, "v8::Object::Get()", Value);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> result;
has_pending_exception =
!i::Runtime::GetObjectProperty(isolate, self, key_obj).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(Utils::ToLocal(result));
}
-Local<Value> v8::Object::Get(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Get()", return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
+Local<Value> v8::Object::Get(v8::Handle<Value> key) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(Get(context, key), Value);
+}
+
+
+MaybeLocal<Value> v8::Object::Get(Local<Context> context, uint32_t index) {
+ PREPARE_FOR_EXECUTION(context, "v8::Object::Get()", Value);
+ auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
has_pending_exception =
!i::Object::GetElement(isolate, self, index).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(Utils::ToLocal(result));
+}
+
+
+Local<Value> v8::Object::Get(uint32_t index) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(Get(context, index), Value);
}
@@ -3321,88 +3547,93 @@ Local<Value> v8::Object::GetPrivate(v8::Handle<Private> key) {
}
-PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPropertyAttributes()",
- return static_cast<PropertyAttribute>(NONE));
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+Maybe<PropertyAttribute> v8::Object::GetPropertyAttributes(
+ Local<Context> context, Local<Value> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(
+ context, "v8::Object::GetPropertyAttributes()", PropertyAttribute);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
if (!key_obj->IsName()) {
- EXCEPTION_PREAMBLE(isolate);
has_pending_exception = !i::Execution::ToString(
isolate, key_obj).ToHandle(&key_obj);
- EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
}
- i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
- EXCEPTION_PREAMBLE(isolate);
- Maybe<PropertyAttributes> result =
- i::JSReceiver::GetPropertyAttributes(self, key_name);
- has_pending_exception = !result.has_value;
- EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
- if (result.value == ABSENT) return static_cast<PropertyAttribute>(NONE);
- return static_cast<PropertyAttribute>(result.value);
+ auto key_name = i::Handle<i::Name>::cast(key_obj);
+ auto result = i::JSReceiver::GetPropertyAttributes(self, key_name);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
+ if (result.FromJust() == ABSENT) {
+ return Just(static_cast<PropertyAttribute>(NONE));
+ }
+ return Just(static_cast<PropertyAttribute>(result.FromJust()));
}
-Local<Value> v8::Object::GetOwnPropertyDescriptor(Local<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetOwnPropertyDescriptor()",
- return Local<Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Handle<i::Name> key_name = Utils::OpenHandle(*key);
+PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return GetPropertyAttributes(context, key)
+ .FromMaybe(static_cast<PropertyAttribute>(NONE));
+}
+
+
+MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
+ Local<String> key) {
+ PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyDescriptor()",
+ Value);
+ auto obj = Utils::OpenHandle(this);
+ auto key_name = Utils::OpenHandle(*key);
i::Handle<i::Object> args[] = { obj, key_name };
- EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> result;
has_pending_exception =
!CallV8HeapFunction(isolate, "ObjectGetOwnPropertyDescriptor",
isolate->factory()->undefined_value(),
arraysize(args), args).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(Utils::ToLocal(result));
+}
+
+
+Local<Value> v8::Object::GetOwnPropertyDescriptor(Local<String> key) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyDescriptor(context, key), Value);
}
Local<Value> v8::Object::GetPrototype() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPrototype()", return Local<v8::Value>());
- ENTER_V8(isolate);
- i::Handle<i::Object> self = Utils::OpenHandle(this);
+ auto isolate = Utils::OpenHandle(this)->GetIsolate();
+ auto self = Utils::OpenHandle(this);
i::PrototypeIterator iter(isolate, self);
return Utils::ToLocal(i::PrototypeIterator::GetCurrent(iter));
}
-bool v8::Object::SetPrototype(Handle<Value> value) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetPrototype()", return false);
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
+Maybe<bool> v8::Object::SetPrototype(Local<Context> context,
+ Local<Value> value) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetPrototype()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto value_obj = Utils::OpenHandle(*value);
// We do not allow exceptions thrown while setting the prototype
// to propagate outside.
- TryCatch try_catch;
- EXCEPTION_PREAMBLE(isolate);
- i::MaybeHandle<i::Object> result =
- i::JSObject::SetPrototype(self, value_obj, false);
+ TryCatch try_catch(reinterpret_cast<v8::Isolate*>(isolate));
+ auto result = i::JSObject::SetPrototype(self, value_obj, false);
has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return true;
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(true);
+}
+
+
+bool v8::Object::SetPrototype(Handle<Value> value) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return SetPrototype(context, value).FromMaybe(false);
}
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Handle<FunctionTemplate> tmpl) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::FindInstanceInPrototypeChain()",
- return Local<v8::Object>());
- ENTER_V8(isolate);
+ auto isolate = Utils::OpenHandle(this)->GetIsolate();
i::PrototypeIterator iter(isolate, *Utils::OpenHandle(this),
i::PrototypeIterator::START_AT_RECEIVER);
- i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
+ auto tmpl_info = *Utils::OpenHandle(*tmpl);
while (!tmpl_info->IsTemplateFor(iter.GetCurrent())) {
iter.Advance();
if (iter.IsAtEnd()) {
@@ -3414,59 +3645,55 @@ Local<Object> v8::Object::FindInstanceInPrototypeChain(
}
-Local<Array> v8::Object::GetPropertyNames() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetPropertyNames()",
- return Local<v8::Array>());
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
+MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
+ PREPARE_FOR_EXECUTION(context, "v8::Object::GetPropertyNames()", Array);
+ auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
has_pending_exception = !i::JSReceiver::GetKeys(
self, i::JSReceiver::INCLUDE_PROTOS).ToHandle(&value);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Array>());
+ RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
- i::Handle<i::FixedArray> elms = isolate->factory()->CopyFixedArray(value);
- i::Handle<i::JSArray> result =
- isolate->factory()->NewJSArrayWithElements(elms);
- return Utils::ToLocal(scope.CloseAndEscape(result));
+ auto elms = isolate->factory()->CopyFixedArray(value);
+ auto result = isolate->factory()->NewJSArrayWithElements(elms);
+ RETURN_ESCAPED(Utils::ToLocal(result));
}
-Local<Array> v8::Object::GetOwnPropertyNames() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetOwnPropertyNames()",
- return Local<v8::Array>());
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
+Local<Array> v8::Object::GetPropertyNames() {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(GetPropertyNames(context), Array);
+}
+
+
+MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context) {
+ PREPARE_FOR_EXECUTION(context, "v8::Object::GetOwnPropertyNames()", Array);
+ auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
has_pending_exception = !i::JSReceiver::GetKeys(
self, i::JSReceiver::OWN_ONLY).ToHandle(&value);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Array>());
+ RETURN_ON_FAILED_EXECUTION(Array);
// Because we use caching to speed up enumeration it is important
// to never change the result of the basic enumeration function so
// we clone the result.
- i::Handle<i::FixedArray> elms = isolate->factory()->CopyFixedArray(value);
- i::Handle<i::JSArray> result =
- isolate->factory()->NewJSArrayWithElements(elms);
- return Utils::ToLocal(scope.CloseAndEscape(result));
+ auto elms = isolate->factory()->CopyFixedArray(value);
+ auto result = isolate->factory()->NewJSArrayWithElements(elms);
+ RETURN_ESCAPED(Utils::ToLocal(result));
}
-Local<String> v8::Object::ObjectProtoToString() {
- i::Isolate* i_isolate = Utils::OpenHandle(this)->GetIsolate();
- Isolate* isolate = reinterpret_cast<Isolate*>(i_isolate);
- ON_BAILOUT(i_isolate, "v8::Object::ObjectProtoToString()",
- return Local<v8::String>());
- ENTER_V8(i_isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+Local<Array> v8::Object::GetOwnPropertyNames() {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyNames(context), Array);
+}
- i::Handle<i::Object> name(self->class_name(), i_isolate);
+
+MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
+ auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
+ auto v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ i::Handle<i::Object> name(self->class_name(), isolate);
i::Handle<i::Object> tag;
// Native implementation of Object.prototype.toString (v8natives.js):
@@ -3475,82 +3702,79 @@ Local<String> v8::Object::ObjectProtoToString() {
// return "[object " + c + "]";
if (!name->IsString()) {
- return v8::String::NewFromUtf8(isolate, "[object ]");
- } else {
- i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
- if (i::String::Equals(class_name,
- i_isolate->factory()->Arguments_string())) {
- return v8::String::NewFromUtf8(isolate, "[object Object]");
- } else {
- if (internal::FLAG_harmony_tostring) {
- i::Handle<i::Symbol> toStringTag =
- Utils::OpenHandle(*Symbol::GetToStringTag(isolate));
- EXCEPTION_PREAMBLE(i_isolate);
- has_pending_exception =
- !i::Runtime::GetObjectProperty(i_isolate, self, toStringTag)
- .ToHandle(&tag);
- EXCEPTION_BAILOUT_CHECK(i_isolate, Local<v8::String>());
-
- if (tag->IsString()) {
- class_name = i::Handle<i::String>::cast(tag);
- }
- }
- const char* prefix = "[object ";
- Local<String> str = Utils::ToLocal(class_name);
- const char* postfix = "]";
+ return v8::String::NewFromUtf8(v8_isolate, "[object ]");
+ }
+ auto class_name = i::Handle<i::String>::cast(name);
+ if (i::String::Equals(class_name, isolate->factory()->Arguments_string())) {
+ return v8::String::NewFromUtf8(v8_isolate, "[object Object]");
+ }
+ if (internal::FLAG_harmony_tostring) {
+ PREPARE_FOR_EXECUTION(context, "v8::Object::ObjectProtoToString()", String);
+ auto toStringTag = isolate->factory()->to_string_tag_symbol();
+ has_pending_exception = !i::Runtime::GetObjectProperty(
+ isolate, self, toStringTag).ToHandle(&tag);
+ RETURN_ON_FAILED_EXECUTION(String);
+ if (tag->IsString()) {
+ class_name = i::Handle<i::String>::cast(tag).EscapeFrom(&handle_scope);
+ }
+ }
+ const char* prefix = "[object ";
+ Local<String> str = Utils::ToLocal(class_name);
+ const char* postfix = "]";
- int prefix_len = i::StrLength(prefix);
- int str_len = str->Utf8Length();
- int postfix_len = i::StrLength(postfix);
+ int prefix_len = i::StrLength(prefix);
+ int str_len = str->Utf8Length();
+ int postfix_len = i::StrLength(postfix);
- int buf_len = prefix_len + str_len + postfix_len;
- i::ScopedVector<char> buf(buf_len);
+ int buf_len = prefix_len + str_len + postfix_len;
+ i::ScopedVector<char> buf(buf_len);
- // Write prefix.
- char* ptr = buf.start();
- i::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
- ptr += prefix_len;
+ // Write prefix.
+ char* ptr = buf.start();
+ i::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
+ ptr += prefix_len;
- // Write real content.
- str->WriteUtf8(ptr, str_len);
- ptr += str_len;
+ // Write real content.
+ str->WriteUtf8(ptr, str_len);
+ ptr += str_len;
- // Write postfix.
- i::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
+ // Write postfix.
+ i::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
- // Copy the buffer into a heap-allocated string and return it.
- Local<String> result = v8::String::NewFromUtf8(
- isolate, buf.start(), String::kNormalString, buf_len);
- return result;
- }
- }
+ // Copy the buffer into a heap-allocated string and return it.
+ return v8::String::NewFromUtf8(v8_isolate, buf.start(), String::kNormalString,
+ buf_len);
+}
+
+
+Local<String> v8::Object::ObjectProtoToString() {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(ObjectProtoToString(context), String);
}
Local<String> v8::Object::GetConstructorName() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetConstructorName()",
- return Local<v8::String>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
i::Handle<i::String> name(self->constructor_name());
return Utils::ToLocal(name);
}
-bool v8::Object::Delete(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Delete()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- EXCEPTION_PREAMBLE(isolate);
+Maybe<bool> v8::Object::Delete(Local<Context> context, Local<Value> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Delete()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> obj;
has_pending_exception =
!DeleteObjectProperty(isolate, self, key_obj, i::SLOPPY).ToHandle(&obj);
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return obj->IsTrue();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(obj->IsTrue());
+}
+
+
+bool v8::Object::Delete(v8::Handle<Value> key) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return Delete(context, key).FromMaybe(false);
}
@@ -3559,14 +3783,11 @@ bool v8::Object::DeletePrivate(v8::Handle<Private> key) {
}
-bool v8::Object::Has(v8::Handle<Value> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Has()", return false);
- ENTER_V8(isolate);
- i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
- i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- EXCEPTION_PREAMBLE(isolate);
- Maybe<bool> maybe;
+Maybe<bool> v8::Object::Has(Local<Context> context, Local<Value> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Get()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ Maybe<bool> maybe = Nothing<bool>();
// Check if the given key is an array index.
uint32_t index;
if (key_obj->ToArrayIndex(&index)) {
@@ -3578,10 +3799,15 @@ bool v8::Object::Has(v8::Handle<Value> key) {
maybe = i::JSReceiver::HasProperty(self, name);
}
}
- if (!maybe.has_value) has_pending_exception = true;
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- DCHECK(maybe.has_value);
- return maybe.value;
+ has_pending_exception = maybe.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return maybe;
+}
+
+
+bool v8::Object::Has(v8::Handle<Value> key) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return Has(context, key).FromMaybe(false);
}
@@ -3592,62 +3818,71 @@ bool v8::Object::HasPrivate(v8::Handle<Private> key) {
}
-bool v8::Object::Delete(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::DeleteProperty()",
- return false);
- ENTER_V8(isolate);
- HandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-
- EXCEPTION_PREAMBLE(isolate);
+Maybe<bool> v8::Object::Delete(Local<Context> context, uint32_t index) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::DeleteProperty()",
+ bool);
+ auto self = Utils::OpenHandle(this);
i::Handle<i::Object> obj;
has_pending_exception =
!i::JSReceiver::DeleteElement(self, index).ToHandle(&obj);
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return obj->IsTrue();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(obj->IsTrue());
+}
+
+
+bool v8::Object::Delete(uint32_t index) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return Delete(context, index).FromMaybe(false);
+}
+
+
+Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::Get()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto maybe = i::JSReceiver::HasElement(self, index);
+ has_pending_exception = maybe.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return maybe;
}
bool v8::Object::Has(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- Maybe<bool> maybe = i::JSReceiver::HasElement(self, index);
- has_pending_exception = !maybe.has_value;
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return maybe.value;
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return Has(context, index).FromMaybe(false);
}
-template<typename Getter, typename Setter, typename Data>
-static inline bool ObjectSetAccessor(Object* obj,
- Handle<Name> name,
- Getter getter,
- Setter setter,
- Data data,
+template <typename Getter, typename Setter, typename Data>
+static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* obj,
+ Local<Name> name, Getter getter,
+ Setter setter, Data data,
AccessControl settings,
PropertyAttribute attributes) {
- i::Isolate* isolate = Utils::OpenHandle(obj)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::SetAccessor()", bool);
v8::Handle<AccessorSignature> signature;
- i::Handle<i::AccessorInfo> info = MakeAccessorInfo(
- name, getter, setter, data, settings, attributes, signature);
- if (info.is_null()) return false;
+ auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
+ signature);
+ if (info.is_null()) return Nothing<bool>();
bool fast = Utils::OpenHandle(obj)->HasFastProperties();
i::Handle<i::Object> result;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, result,
- i::JSObject::SetAccessor(Utils::OpenHandle(obj), info),
- false);
- if (result->IsUndefined()) return false;
+ has_pending_exception =
+ !i::JSObject::SetAccessor(Utils::OpenHandle(obj), info).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ if (result->IsUndefined()) return Nothing<bool>();
if (fast) {
i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0, "APISetAccessor");
}
- return true;
+ return Just(true);
+}
+
+
+Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter,
+ MaybeLocal<Value> data, AccessControl settings,
+ PropertyAttribute attribute) {
+ return ObjectSetAccessor(context, this, name, getter, setter,
+ data.FromMaybe(Local<Value>()), settings, attribute);
}
@@ -3657,8 +3892,9 @@ bool Object::SetAccessor(Handle<String> name,
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attributes) {
- return ObjectSetAccessor(
- this, name, getter, setter, data, settings, attributes);
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
+ attributes).FromMaybe(false);
}
@@ -3668,8 +3904,9 @@ bool Object::SetAccessor(Handle<Name> name,
v8::Handle<Value> data,
AccessControl settings,
PropertyAttribute attributes) {
- return ObjectSetAccessor(
- this, name, getter, setter, data, settings, attributes);
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
+ attributes).FromMaybe(false);
}
@@ -3681,7 +3918,6 @@ void Object::SetAccessorProperty(Local<Name> name,
// TODO(verwaest): Remove |settings|.
DCHECK_EQ(v8::DEFAULT, settings);
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetAccessorProperty()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Object> getter_i = v8::Utils::OpenHandle(*getter);
@@ -3695,116 +3931,197 @@ void Object::SetAccessorProperty(Local<Name> name,
}
+Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
+ Local<Name> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::HasOwnProperty()",
+ bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_val = Utils::OpenHandle(*key);
+ auto result = i::JSReceiver::HasOwnProperty(self, key_val);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
+}
+
+
bool v8::Object::HasOwnProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
- return false);
- EXCEPTION_PREAMBLE(isolate);
- Maybe<bool> maybe = i::JSReceiver::HasOwnProperty(Utils::OpenHandle(this),
- Utils::OpenHandle(*key));
- has_pending_exception = !maybe.has_value;
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return maybe.value;
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return HasOwnProperty(context, key).FromMaybe(false);
+}
+
+
+Maybe<bool> v8::Object::HasRealNamedProperty(Local<Context> context,
+ Local<Name> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "v8::Object::HasRealNamedProperty()",
+ bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_val = Utils::OpenHandle(*key);
+ auto result = i::JSObject::HasRealNamedProperty(self, key_val);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
}
bool v8::Object::HasRealNamedProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
- return false);
- EXCEPTION_PREAMBLE(isolate);
- Maybe<bool> maybe = i::JSObject::HasRealNamedProperty(
- Utils::OpenHandle(this), Utils::OpenHandle(*key));
- has_pending_exception = !maybe.has_value;
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return maybe.value;
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return HasRealNamedProperty(context, key).FromMaybe(false);
+}
+
+
+Maybe<bool> v8::Object::HasRealIndexedProperty(Local<Context> context,
+ uint32_t index) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context,
+ "v8::Object::HasRealIndexedProperty()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto result = i::JSObject::HasRealElementProperty(self, index);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
}
bool v8::Object::HasRealIndexedProperty(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()",
- return false);
- EXCEPTION_PREAMBLE(isolate);
- Maybe<bool> maybe =
- i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index);
- has_pending_exception = !maybe.has_value;
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return maybe.value;
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return HasRealIndexedProperty(context, index).FromMaybe(false);
+}
+
+
+Maybe<bool> v8::Object::HasRealNamedCallbackProperty(Local<Context> context,
+ Local<Name> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(
+ context, "v8::Object::HasRealNamedCallbackProperty()", bool);
+ auto self = Utils::OpenHandle(this);
+ auto key_val = Utils::OpenHandle(*key);
+ auto result = i::JSObject::HasRealNamedCallbackProperty(self, key_val);
+ has_pending_exception = result.IsNothing();
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return result;
}
bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::HasRealNamedCallbackProperty()",
- return false);
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- Maybe<bool> maybe = i::JSObject::HasRealNamedCallbackProperty(
- Utils::OpenHandle(this), Utils::OpenHandle(*key));
- has_pending_exception = !maybe.has_value;
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return maybe.value;
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return HasRealNamedCallbackProperty(context, key).FromMaybe(false);
}
bool v8::Object::HasNamedLookupInterceptor() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasNamedLookupInterceptor()",
- return false);
- return Utils::OpenHandle(this)->HasNamedInterceptor();
+ auto self = Utils::OpenHandle(this);
+ return self->HasNamedInterceptor();
}
bool v8::Object::HasIndexedLookupInterceptor() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::HasIndexedLookupInterceptor()",
- return false);
- return Utils::OpenHandle(this)->HasIndexedInterceptor();
+ auto self = Utils::OpenHandle(this);
+ return self->HasIndexedInterceptor();
}
-static Local<Value> GetPropertyByLookup(i::LookupIterator* it) {
- // If the property being looked up is a callback, it can throw an exception.
- EXCEPTION_PREAMBLE(it->isolate());
- i::Handle<i::Object> result;
- has_pending_exception = !i::Object::GetProperty(it).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(it->isolate(), Local<Value>());
-
- if (it->IsFound()) return Utils::ToLocal(result);
- return Local<Value>();
+MaybeLocal<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
+ Local<Context> context, Local<Name> key) {
+ PREPARE_FOR_EXECUTION(
+ context, "v8::Object::GetRealNamedPropertyInPrototypeChain()", Value);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ i::PrototypeIterator iter(isolate, self);
+ if (iter.IsAtEnd()) return MaybeLocal<Value>();
+ auto proto = i::PrototypeIterator::GetCurrent(iter);
+ i::LookupIterator it(self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+ i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) return MaybeLocal<Value>();
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(i::Object::GetProperty(&it), &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
}
Local<Value> v8::Object::GetRealNamedPropertyInPrototypeChain(
Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::GetRealNamedPropertyInPrototypeChain()",
- return Local<Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::PrototypeIterator iter(isolate, self_obj);
- if (iter.IsAtEnd()) return Local<Value>();
- i::Handle<i::Object> proto = i::PrototypeIterator::GetCurrent(iter);
- i::LookupIterator it(self_obj, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(GetRealNamedPropertyInPrototypeChain(context, key),
+ Value);
+}
+
+
+Maybe<PropertyAttribute>
+v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(
+ Local<Context> context, Local<Name> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(
+ context, "v8::Object::GetRealNamedPropertyAttributesInPrototypeChain()",
+ PropertyAttribute);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ i::PrototypeIterator iter(isolate, self);
+ if (iter.IsAtEnd()) return Nothing<PropertyAttribute>();
+ auto proto = i::PrototypeIterator::GetCurrent(iter);
+ i::LookupIterator it(self, key_obj, i::Handle<i::JSReceiver>::cast(proto),
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- return GetPropertyByLookup(&it);
+ if (!it.IsFound()) return Nothing<PropertyAttribute>();
+ auto result = i::JSReceiver::GetPropertyAttributes(&it);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
+ if (result.FromJust() == ABSENT) {
+ return Just(static_cast<PropertyAttribute>(NONE));
+ }
+ return Just<PropertyAttribute>(
+ static_cast<PropertyAttribute>(result.FromJust()));
+}
+
+
+Maybe<PropertyAttribute>
+v8::Object::GetRealNamedPropertyAttributesInPrototypeChain(Handle<String> key) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return GetRealNamedPropertyAttributesInPrototypeChain(context, key);
+}
+
+
+MaybeLocal<Value> v8::Object::GetRealNamedProperty(Local<Context> context,
+ Local<Name> key) {
+ PREPARE_FOR_EXECUTION(
+ context, "v8::Object::GetRealNamedPropertyInPrototypeChain()", Value);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ i::LookupIterator it(self, key_obj,
+ i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+ if (!it.IsFound()) return MaybeLocal<Value>();
+ Local<Value> result;
+ has_pending_exception = !ToLocal<Value>(i::Object::GetProperty(&it), &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
}
Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetRealNamedProperty()",
- return Local<Value>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- i::LookupIterator it(self_obj, key_obj,
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(GetRealNamedProperty(context, key), Value);
+}
+
+
+Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
+ Local<Context> context, Local<Name> key) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(
+ context, "v8::Object::GetRealNamedPropertyAttributes()",
+ PropertyAttribute);
+ auto self = Utils::OpenHandle(this);
+ auto key_obj = Utils::OpenHandle(*key);
+ i::LookupIterator it(self, key_obj,
i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
- return GetPropertyByLookup(&it);
+ if (!it.IsFound()) return Nothing<PropertyAttribute>();
+ auto result = i::JSReceiver::GetPropertyAttributes(&it);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(PropertyAttribute);
+ if (result.FromJust() == ABSENT) {
+ return Just(static_cast<PropertyAttribute>(NONE));
+ }
+ return Just<PropertyAttribute>(
+ static_cast<PropertyAttribute>(result.FromJust()));
+}
+
+
+Maybe<PropertyAttribute> v8::Object::GetRealNamedPropertyAttributes(
+ Handle<String> key) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ return GetRealNamedPropertyAttributes(context, key);
}
@@ -3813,7 +4130,6 @@ Local<Value> v8::Object::GetRealNamedProperty(Handle<String> key) {
// the old map of this object will fail.
void v8::Object::TurnOnAccessCheck() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::TurnOnAccessCheck()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
@@ -3830,35 +4146,26 @@ void v8::Object::TurnOnAccessCheck() {
Local<v8::Object> v8::Object::Clone() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::Clone()", return Local<Object>());
+ auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSObject> result = isolate->factory()->CopyJSObject(self);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
+ auto result = isolate->factory()->CopyJSObject(self);
+ CHECK(!result.is_null());
return Utils::ToLocal(result);
}
Local<v8::Context> v8::Object::CreationContext() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate,
- "v8::Object::CreationContext()", return Local<v8::Context>());
- ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Context* context = self->GetCreationContext();
- return Utils::ToLocal(i::Handle<i::Context>(context));
+ auto self = Utils::OpenHandle(this);
+ auto context = handle(self->GetCreationContext());
+ return Utils::ToLocal(context);
}
int v8::Object::GetIdentityHash() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetIdentityHash()", return 0);
- ENTER_V8(isolate);
+ auto isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope scope(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
return i::JSReceiver::GetOrCreateIdentityHash(self)->value();
}
@@ -3866,7 +4173,6 @@ int v8::Object::GetIdentityHash() {
bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Handle<v8::Value> value) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::SetHiddenValue()", return false);
if (value.IsEmpty()) return DeleteHiddenValue(key);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
@@ -3883,8 +4189,6 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::GetHiddenValue()",
- return Local<v8::Value>());
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
@@ -3898,7 +4202,6 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::DeleteHiddenValue()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
@@ -3947,8 +4250,8 @@ void PrepareExternalArrayElements(i::Handle<i::JSObject> object,
void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::SetElementsToPixelData()", return);
+ auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
if (!Utils::ApiCheck(length >= 0 &&
@@ -3957,7 +4260,6 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
"length exceeds max acceptable value")) {
return;
}
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!Utils::ApiCheck(!self->IsJSArray(),
"v8::Object::SetIndexedPropertiesToPixelData()",
"JSArray is not supported")) {
@@ -3968,35 +4270,27 @@ void v8::Object::SetIndexedPropertiesToPixelData(uint8_t* data, int length) {
bool v8::Object::HasIndexedPropertiesInPixelData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(), "v8::HasIndexedPropertiesInPixelData()",
- return false);
+ auto self = Utils::OpenHandle(this);
return self->HasExternalUint8ClampedElements();
}
uint8_t* v8::Object::GetIndexedPropertiesPixelData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelData()",
- return NULL);
+ auto self = Utils::OpenHandle(this);
if (self->HasExternalUint8ClampedElements()) {
return i::ExternalUint8ClampedArray::cast(self->elements())->
external_uint8_clamped_pointer();
- } else {
- return NULL;
}
+ return nullptr;
}
int v8::Object::GetIndexedPropertiesPixelDataLength() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(), "v8::GetIndexedPropertiesPixelDataLength()",
- return -1);
+ auto self = Utils::OpenHandle(this);
if (self->HasExternalUint8ClampedElements()) {
return i::ExternalUint8ClampedArray::cast(self->elements())->length();
- } else {
- return -1;
}
+ return -1;
}
@@ -4004,8 +4298,8 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
void* data,
ExternalArrayType array_type,
int length) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::SetIndexedPropertiesToExternalArrayData()", return);
+ auto self = Utils::OpenHandle(this);
+ auto isolate = self->GetIsolate();
ENTER_V8(isolate);
i::HandleScope scope(isolate);
if (!Utils::ApiCheck(length >= 0 && length <= i::ExternalArray::kMaxLength,
@@ -4013,7 +4307,6 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
"length exceeds max acceptable value")) {
return;
}
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!Utils::ApiCheck(!self->IsJSArray(),
"v8::Object::SetIndexedPropertiesToExternalArrayData()",
"JSArray is not supported")) {
@@ -4024,32 +4317,22 @@ void v8::Object::SetIndexedPropertiesToExternalArrayData(
bool v8::Object::HasIndexedPropertiesInExternalArrayData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::HasIndexedPropertiesInExternalArrayData()",
- return false);
+ auto self = Utils::OpenHandle(this);
return self->HasExternalArrayElements();
}
void* v8::Object::GetIndexedPropertiesExternalArrayData() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::GetIndexedPropertiesExternalArrayData()",
- return NULL);
+ auto self = Utils::OpenHandle(this);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->external_pointer();
- } else {
- return NULL;
}
+ return nullptr;
}
ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::GetIndexedPropertiesExternalArrayDataType()",
- return static_cast<ExternalArrayType>(-1));
+ auto self = Utils::OpenHandle(this);
switch (self->elements()->map()->instance_type()) {
#define INSTANCE_TYPE_TO_ARRAY_TYPE(Type, type, TYPE, ctype, size) \
case i::EXTERNAL_##TYPE##_ARRAY_TYPE: \
@@ -4063,101 +4346,99 @@ ExternalArrayType v8::Object::GetIndexedPropertiesExternalArrayDataType() {
int v8::Object::GetIndexedPropertiesExternalArrayDataLength() {
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- ON_BAILOUT(self->GetIsolate(),
- "v8::GetIndexedPropertiesExternalArrayDataLength()",
- return 0);
+ auto self = Utils::OpenHandle(this);
if (self->HasExternalArrayElements()) {
return i::ExternalArray::cast(self->elements())->length();
- } else {
- return -1;
}
+ return -1;
}
bool v8::Object::IsCallable() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::IsCallable()", return false);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- return obj->IsCallable();
+ auto self = Utils::OpenHandle(this);
+ return self->IsCallable();
}
-Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Value> recv,
- int argc,
- v8::Handle<v8::Value> argv[]) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::CallAsFunction()",
- return Local<v8::Value>());
- LOG_API(isolate, "Object::CallAsFunction");
- ENTER_V8(isolate);
+MaybeLocal<Value> Object::CallAsFunction(Local<Context> context,
+ Handle<Value> recv, int argc,
+ Handle<Value> argv[]) {
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Object::CallAsFunction()",
+ Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
+ auto self = Utils::OpenHandle(this);
+ auto recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>();
- if (obj->IsJSFunction()) {
- fun = i::Handle<i::JSFunction>::cast(obj);
+ i::Handle<i::JSFunction> fun;
+ if (self->IsJSFunction()) {
+ fun = i::Handle<i::JSFunction>::cast(self);
} else {
- EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> delegate;
- has_pending_exception = !i::Execution::TryGetFunctionDelegate(
- isolate, obj).ToHandle(&delegate);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+ has_pending_exception = !i::Execution::TryGetFunctionDelegate(isolate, self)
+ .ToHandle(&delegate);
+ RETURN_ON_FAILED_EXECUTION(Value);
fun = i::Handle<i::JSFunction>::cast(delegate);
- recv_obj = obj;
+ recv_obj = self;
}
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned;
- has_pending_exception = !i::Execution::Call(
- isolate, fun, recv_obj, argc, args, true).ToHandle(&returned);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Value>());
- return Utils::ToLocal(scope.CloseAndEscape(returned));
+ Local<Value> result;
+ has_pending_exception =
+ !ToLocal<Value>(
+ i::Execution::Call(isolate, fun, recv_obj, argc, args, true),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
}
-Local<v8::Value> Object::CallAsConstructor(int argc,
- v8::Handle<v8::Value> argv[]) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Object::CallAsConstructor()",
- return Local<v8::Object>());
- LOG_API(isolate, "Object::CallAsConstructor");
- ENTER_V8(isolate);
+Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Value> recv, int argc,
+ v8::Handle<v8::Value> argv[]) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ Local<Value>* argv_cast = reinterpret_cast<Local<Value>*>(argv);
+ RETURN_TO_LOCAL_UNCHECKED(CallAsFunction(context, recv, argc, argv_cast),
+ Value);
+}
+
+
+MaybeLocal<Value> Object::CallAsConstructor(Local<Context> context, int argc,
+ Local<Value> argv[]) {
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context,
+ "v8::Object::CallAsConstructor()", Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- if (obj->IsJSFunction()) {
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned;
- has_pending_exception = !i::Execution::New(
- fun, argc, args).ToHandle(&returned);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
- return Utils::ToLocal(scope.CloseAndEscape(
- i::Handle<i::JSObject>::cast(returned)));
- }
- EXCEPTION_PREAMBLE(isolate);
+ if (self->IsJSFunction()) {
+ auto fun = i::Handle<i::JSFunction>::cast(self);
+ Local<Value> result;
+ has_pending_exception =
+ !ToLocal<Value>(i::Execution::New(fun, argc, args), &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+ }
i::Handle<i::Object> delegate;
has_pending_exception = !i::Execution::TryGetConstructorDelegate(
- isolate, obj).ToHandle(&delegate);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
+ isolate, self).ToHandle(&delegate);
+ RETURN_ON_FAILED_EXECUTION(Value);
if (!delegate->IsUndefined()) {
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(delegate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned;
- has_pending_exception = !i::Execution::Call(
- isolate, fun, obj, argc, args).ToHandle(&returned);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
+ auto fun = i::Handle<i::JSFunction>::cast(delegate);
+ Local<Value> result;
+ has_pending_exception =
+ !ToLocal<Value>(i::Execution::Call(isolate, fun, self, argc, args),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
DCHECK(!delegate->IsUndefined());
- return Utils::ToLocal(scope.CloseAndEscape(returned));
+ RETURN_ESCAPED(result);
}
- return Local<v8::Object>();
+ return MaybeLocal<Value>();
+}
+
+
+Local<v8::Value> Object::CallAsConstructor(int argc,
+ v8::Handle<v8::Value> argv[]) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ Local<Value>* argv_cast = reinterpret_cast<Local<Value>*>(argv);
+ RETURN_TO_LOCAL_UNCHECKED(CallAsConstructor(context, argc, argv_cast), Value);
}
@@ -4179,52 +4460,56 @@ Local<v8::Object> Function::NewInstance() const {
}
-Local<v8::Object> Function::NewInstance(int argc,
- v8::Handle<v8::Value> argv[]) const {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Function::NewInstance()",
- return Local<v8::Object>());
- LOG_API(isolate, "Function::NewInstance");
- ENTER_V8(isolate);
+MaybeLocal<Object> Function::NewInstance(Local<Context> context, int argc,
+ v8::Handle<v8::Value> argv[]) const {
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::NewInstance()",
+ Object);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned;
- has_pending_exception = !i::Execution::New(
- function, argc, args).ToHandle(&returned);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
- return scope.Escape(Utils::ToLocal(i::Handle<i::JSObject>::cast(returned)));
+ Local<Object> result;
+ has_pending_exception =
+ !ToLocal<Object>(i::Execution::New(self, argc, args), &result);
+ RETURN_ON_FAILED_EXECUTION(Object);
+ RETURN_ESCAPED(result);
}
-Local<v8::Value> Function::Call(v8::Handle<v8::Value> recv, int argc,
- v8::Handle<v8::Value> argv[]) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
- LOG_API(isolate, "Function::Call");
- ENTER_V8(isolate);
+Local<v8::Object> Function::NewInstance(int argc,
+ v8::Handle<v8::Value> argv[]) const {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(NewInstance(context, argc, argv), Object);
+}
+
+
+MaybeLocal<v8::Value> Function::Call(Local<Context> context,
+ v8::Handle<v8::Value> recv, int argc,
+ v8::Handle<v8::Value> argv[]) {
+ PREPARE_FOR_EXECUTION_WITH_CALLBACK(context, "v8::Function::Call()", Value);
i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> returned;
- has_pending_exception = !i::Execution::Call(
- isolate, fun, recv_obj, argc, args, true).ToHandle(&returned);
- EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>());
- return Utils::ToLocal(scope.CloseAndEscape(returned));
+ Local<Value> result;
+ has_pending_exception =
+ !ToLocal<Value>(
+ i::Execution::Call(isolate, self, recv_obj, argc, args, true),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<v8::Value> Function::Call(v8::Handle<v8::Value> recv, int argc,
+ v8::Handle<v8::Value> argv[]) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(Call(context, recv, argc, argv), Value);
}
void Function::SetName(v8::Handle<v8::String> name) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
- USE(isolate);
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
func->shared()->set_name(*Utils::OpenHandle(*name));
}
@@ -4246,22 +4531,16 @@ Handle<Value> Function::GetInferredName() const {
Handle<Value> Function::GetDisplayName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Function::GetDisplayName()",
- return ToApiHandle<Primitive>(
- isolate->factory()->undefined_value()));
ENTER_V8(isolate);
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
i::Handle<i::String> property_name =
- isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("displayName"));
-
+ isolate->factory()->NewStringFromStaticChars("displayName");
i::Handle<i::Object> value =
i::JSObject::GetDataProperty(func, property_name);
if (value->IsString()) {
i::Handle<i::String> name = i::Handle<i::String>::cast(value);
if (name->length() > 0) return Utils::ToLocal(name);
}
-
return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
}
@@ -4270,16 +4549,7 @@ ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- i::Handle<i::Object> scriptName = i::Script::GetNameOrSourceURL(script);
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(func->GetIsolate());
- v8::ScriptOrigin origin(
- Utils::ToLocal(scriptName),
- v8::Integer::New(isolate, script->line_offset()->value()),
- v8::Integer::New(isolate, script->column_offset()->value()),
- v8::Boolean::New(isolate, script->is_shared_cross_origin()),
- v8::Integer::New(isolate, script->id()->value()),
- v8::Boolean::New(isolate, script->is_embedder_debug_script()));
- return origin;
+ return GetScriptOriginForScript(func->GetIsolate(), script);
}
return v8::ScriptOrigin(Handle<Value>());
}
@@ -4339,11 +4609,7 @@ Local<v8::Value> Function::GetBoundFunction() const {
int Name::GetIdentityHash() {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Name::GetIdentityHash()", return 0);
- ENTER_V8(isolate);
- i::HandleScope scope(isolate);
- i::Handle<i::Name> self = Utils::OpenHandle(this);
+ auto self = Utils::OpenHandle(this);
return static_cast<int>(self->Hash());
}
@@ -5169,6 +5435,9 @@ void v8::V8::ShutdownPlatform() {
bool v8::V8::Initialize() {
i::V8::Initialize();
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+ i::ReadNatives();
+#endif
return true;
}
@@ -5195,6 +5464,9 @@ void v8::V8::SetArrayBufferAllocator(
bool v8::V8::Dispose() {
i::V8::TearDown();
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+ i::DisposeNatives();
+#endif
return true;
}
@@ -5289,7 +5561,6 @@ Local<Context> v8::Context::New(
v8::Handle<Value> global_object) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
LOG_API(isolate, "Context::New");
- ON_BAILOUT(isolate, "v8::Context::New()", return Local<Context>());
i::HandleScope scope(isolate);
ExtensionConfiguration no_extensions;
if (extensions == NULL) extensions = &no_extensions;
@@ -5302,8 +5573,6 @@ Local<Context> v8::Context::New(
void v8::Context::SetSecurityToken(Handle<Value> token) {
i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Isolate* isolate = env->GetIsolate();
- ENTER_V8(isolate);
i::Handle<i::Object> token_handle = Utils::OpenHandle(*token);
env->set_security_token(*token_handle);
}
@@ -5311,8 +5580,6 @@ void v8::Context::SetSecurityToken(Handle<Value> token) {
void v8::Context::UseDefaultSecurityToken() {
i::Handle<i::Context> env = Utils::OpenHandle(this);
- i::Isolate* isolate = env->GetIsolate();
- ENTER_V8(isolate);
env->set_security_token(env->global_object());
}
@@ -5377,44 +5644,45 @@ void Context::SetErrorMessageForCodeGenerationFromStrings(
}
+MaybeLocal<v8::Object> ObjectTemplate::NewInstance(Local<Context> context) {
+ PREPARE_FOR_EXECUTION(context, "v8::ObjectTemplate::NewInstance()", Object);
+ auto self = Utils::OpenHandle(this);
+ Local<Object> result;
+ has_pending_exception =
+ !ToLocal<Object>(i::ApiNatives::InstantiateObject(self), &result);
+ RETURN_ON_FAILED_EXECUTION(Object);
+ RETURN_ESCAPED(result);
+}
+
+
Local<v8::Object> ObjectTemplate::NewInstance() {
- i::Handle<i::ObjectTemplateInfo> info = Utils::OpenHandle(this);
- i::Isolate* isolate = info->GetIsolate();
- ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()",
- return Local<v8::Object>());
- LOG_API(isolate, "ObjectTemplate::NewInstance");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj;
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(NewInstance(context), Object);
+}
+
+
+MaybeLocal<v8::Function> FunctionTemplate::GetFunction(Local<Context> context) {
+ PREPARE_FOR_EXECUTION(context, "v8::FunctionTemplate::GetFunction()",
+ Function);
+ auto self = Utils::OpenHandle(this);
+ Local<Function> result;
has_pending_exception =
- !i::ApiNatives::InstantiateObject(info).ToHandle(&obj);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Object>());
- return Utils::ToLocal(i::Handle<i::JSObject>::cast(obj));
+ !ToLocal<Function>(i::ApiNatives::InstantiateFunction(self), &result);
+ RETURN_ON_FAILED_EXECUTION(Function);
+ RETURN_ESCAPED(result);
}
Local<v8::Function> FunctionTemplate::GetFunction() {
- i::Handle<i::FunctionTemplateInfo> info = Utils::OpenHandle(this);
- i::Isolate* isolate = info->GetIsolate();
- ON_BAILOUT(isolate, "v8::FunctionTemplate::GetFunction()",
- return Local<v8::Function>());
- LOG_API(isolate, "FunctionTemplate::GetFunction");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> obj;
- has_pending_exception =
- !i::ApiNatives::InstantiateFunction(info).ToHandle(&obj);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::Function>());
- return Utils::ToLocal(i::Handle<i::JSFunction>::cast(obj));
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(GetFunction(context), Function);
}
bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
- i::Handle<i::FunctionTemplateInfo> info = Utils::OpenHandle(this);
- i::Isolate* isolate = info->GetIsolate();
- ON_BAILOUT(isolate, "v8::FunctionTemplate::HasInstanceOf()", return false);
- i::Object* obj = *Utils::OpenHandle(*value);
- return info->IsTemplateFor(obj);
+ auto self = Utils::OpenHandle(this);
+ auto obj = Utils::OpenHandle(*value);
+ return self->IsTemplateFor(*obj);
}
@@ -5456,9 +5724,9 @@ inline int StringLength(const uint16_t* string) {
MUST_USE_RESULT
inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
- String::NewStringType type,
+ v8::NewStringType type,
i::Vector<const char> string) {
- if (type == String::kInternalizedString) {
+ if (type == v8::NewStringType::kInternalized) {
return factory->InternalizeUtf8String(string);
}
return factory->NewStringFromUtf8(string);
@@ -5467,9 +5735,9 @@ inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
MUST_USE_RESULT
inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
- String::NewStringType type,
+ v8::NewStringType type,
i::Vector<const uint8_t> string) {
- if (type == String::kInternalizedString) {
+ if (type == v8::NewStringType::kInternalized) {
return factory->InternalizeOneByteString(string);
}
return factory->NewStringFromOneByte(string);
@@ -5478,36 +5746,32 @@ inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
MUST_USE_RESULT
inline i::MaybeHandle<i::String> NewString(i::Factory* factory,
- String::NewStringType type,
+ v8::NewStringType type,
i::Vector<const uint16_t> string) {
- if (type == String::kInternalizedString) {
+ if (type == v8::NewStringType::kInternalized) {
return factory->InternalizeTwoByteString(string);
}
return factory->NewStringFromTwoByte(string);
}
-template<typename Char>
-inline Local<String> NewString(Isolate* v8_isolate,
- const char* location,
- const char* env,
- const Char* data,
- String::NewStringType type,
- int length) {
+STATIC_ASSERT(v8::String::kMaxLength == i::String::kMaxLength);
+
+
+template <typename Char>
+inline MaybeLocal<String> NewString(Isolate* v8_isolate, const char* location,
+ const char* env, const Char* data,
+ v8::NewStringType type, int length) {
i::Isolate* isolate = reinterpret_cast<internal::Isolate*>(v8_isolate);
- ON_BAILOUT(isolate, location, return Local<String>());
- LOG_API(isolate, env);
- if (length == 0) {
- return String::Empty(v8_isolate);
- }
+ if (length == 0) return String::Empty(v8_isolate);
+ // TODO(dcarney): throw a context free exception.
+ if (length > i::String::kMaxLength) return MaybeLocal<String>();
ENTER_V8(isolate);
- if (length == -1) length = StringLength(data);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::String> result;
- has_pending_exception =
- !NewString(isolate->factory(), type, i::Vector<const Char>(data, length))
- .ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<String>());
+ LOG_API(isolate, env);
+ if (length < 0) length = StringLength(data);
+ i::Handle<i::String> result =
+ NewString(isolate->factory(), type, i::Vector<const Char>(data, length))
+ .ToHandleChecked();
return Utils::ToLocal(result);
}
@@ -5518,12 +5782,17 @@ Local<String> String::NewFromUtf8(Isolate* isolate,
const char* data,
NewStringType type,
int length) {
- return NewString(isolate,
- "v8::String::NewFromUtf8()",
- "String::NewFromUtf8",
- data,
- type,
- length);
+ RETURN_TO_LOCAL_UNCHECKED(
+ NewString(isolate, "v8::String::NewFromUtf8()", "String::NewFromUtf8",
+ data, static_cast<v8::NewStringType>(type), length),
+ String);
+}
+
+
+MaybeLocal<String> String::NewFromUtf8(Isolate* isolate, const char* data,
+ v8::NewStringType type, int length) {
+ return NewString(isolate, "v8::String::NewFromUtf8()", "String::NewFromUtf8",
+ data, type, length);
}
@@ -5531,12 +5800,18 @@ Local<String> String::NewFromOneByte(Isolate* isolate,
const uint8_t* data,
NewStringType type,
int length) {
- return NewString(isolate,
- "v8::String::NewFromOneByte()",
- "String::NewFromOneByte",
- data,
- type,
- length);
+ RETURN_TO_LOCAL_UNCHECKED(
+ NewString(isolate, "v8::String::NewFromOneByte()",
+ "String::NewFromOneByte", data,
+ static_cast<v8::NewStringType>(type), length),
+ String);
+}
+
+
+MaybeLocal<String> String::NewFromOneByte(Isolate* isolate, const uint8_t* data,
+ v8::NewStringType type, int length) {
+ return NewString(isolate, "v8::String::NewFromOneByte()",
+ "String::NewFromOneByte", data, type, length);
}
@@ -5544,20 +5819,27 @@ Local<String> String::NewFromTwoByte(Isolate* isolate,
const uint16_t* data,
NewStringType type,
int length) {
- return NewString(isolate,
- "v8::String::NewFromTwoByte()",
- "String::NewFromTwoByte",
- data,
- type,
- length);
+ RETURN_TO_LOCAL_UNCHECKED(
+ NewString(isolate, "v8::String::NewFromTwoByte()",
+ "String::NewFromTwoByte", data,
+ static_cast<v8::NewStringType>(type), length),
+ String);
+}
+
+
+MaybeLocal<String> String::NewFromTwoByte(Isolate* isolate,
+ const uint16_t* data,
+ v8::NewStringType type, int length) {
+ return NewString(isolate, "v8::String::NewFromTwoByte()",
+ "String::NewFromTwoByte", data, type, length);
}
Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
i::Isolate* isolate = left_string->GetIsolate();
- LOG_API(isolate, "String::New(char)");
ENTER_V8(isolate);
+ LOG_API(isolate, "v8::String::Concat");
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
// If we are steering towards a range error, do not wait for the error to be
// thrown, and return the null handle instead.
@@ -5570,35 +5852,54 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
}
-static i::MaybeHandle<i::String> NewExternalStringHandle(
- i::Isolate* isolate, v8::String::ExternalStringResource* resource) {
- return isolate->factory()->NewExternalStringFromTwoByte(resource);
+MaybeLocal<String> v8::String::NewExternalTwoByte(
+ Isolate* isolate, v8::String::ExternalStringResource* resource) {
+ CHECK(resource && resource->data());
+ // TODO(dcarney): throw a context free exception.
+ if (resource->length() > static_cast<size_t>(i::String::kMaxLength)) {
+ return MaybeLocal<String>();
+ }
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ ENTER_V8(i_isolate);
+ LOG_API(i_isolate, "String::NewExternalTwoByte");
+ i::Handle<i::String> string = i_isolate->factory()
+ ->NewExternalStringFromTwoByte(resource)
+ .ToHandleChecked();
+ i_isolate->heap()->external_string_table()->AddString(*string);
+ return Utils::ToLocal(string);
}
-static i::MaybeHandle<i::String> NewExternalOneByteStringHandle(
- i::Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
- return isolate->factory()->NewExternalStringFromOneByte(resource);
+Local<String> v8::String::NewExternal(
+ Isolate* isolate, v8::String::ExternalStringResource* resource) {
+ RETURN_TO_LOCAL_UNCHECKED(NewExternalTwoByte(isolate, resource), String);
}
-Local<String> v8::String::NewExternal(
- Isolate* isolate,
- v8::String::ExternalStringResource* resource) {
+MaybeLocal<String> v8::String::NewExternalOneByte(
+ Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
+ CHECK(resource && resource->data());
+ // TODO(dcarney): throw a context free exception.
+ if (resource->length() > static_cast<size_t>(i::String::kMaxLength)) {
+ return MaybeLocal<String>();
+ }
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "String::NewExternal");
ENTER_V8(i_isolate);
- CHECK(resource && resource->data());
- EXCEPTION_PREAMBLE(i_isolate);
- i::Handle<i::String> string;
- has_pending_exception =
- !NewExternalStringHandle(i_isolate, resource).ToHandle(&string);
- EXCEPTION_BAILOUT_CHECK(i_isolate, Local<String>());
+ LOG_API(i_isolate, "String::NewExternalOneByte");
+ i::Handle<i::String> string = i_isolate->factory()
+ ->NewExternalStringFromOneByte(resource)
+ .ToHandleChecked();
i_isolate->heap()->external_string_table()->AddString(*string);
return Utils::ToLocal(string);
}
+Local<String> v8::String::NewExternal(
+ Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
+ RETURN_TO_LOCAL_UNCHECKED(NewExternalOneByte(isolate, resource), String);
+}
+
+
bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
@@ -5625,22 +5926,6 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
}
-Local<String> v8::String::NewExternal(
- Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "String::NewExternal");
- ENTER_V8(i_isolate);
- CHECK(resource && resource->data());
- EXCEPTION_PREAMBLE(i_isolate);
- i::Handle<i::String> string;
- has_pending_exception =
- !NewExternalOneByteStringHandle(i_isolate, resource).ToHandle(&string);
- EXCEPTION_BAILOUT_CHECK(i_isolate, Local<String>());
- i_isolate->heap()->external_string_table()->AddString(*string);
- return Utils::ToLocal(string);
-}
-
-
bool v8::String::MakeExternal(
v8::String::ExternalOneByteStringResource* resource) {
i::Handle<i::String> obj = Utils::OpenHandle(this);
@@ -5780,20 +6065,23 @@ Local<v8::Symbol> v8::SymbolObject::ValueOf() const {
}
-Local<v8::Value> v8::Date::New(Isolate* isolate, double time) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- LOG_API(i_isolate, "Date::New");
+MaybeLocal<v8::Value> v8::Date::New(Local<Context> context, double time) {
if (std::isnan(time)) {
// Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
time = std::numeric_limits<double>::quiet_NaN();
}
- ENTER_V8(i_isolate);
- EXCEPTION_PREAMBLE(i_isolate);
- i::Handle<i::Object> obj;
- has_pending_exception = !i::Execution::NewDate(
- i_isolate, time).ToHandle(&obj);
- EXCEPTION_BAILOUT_CHECK(i_isolate, Local<v8::Value>());
- return Utils::ToLocal(obj);
+ PREPARE_FOR_EXECUTION(context, "Date::New", Value);
+ Local<Value> result;
+ has_pending_exception =
+ !ToLocal<Value>(i::Execution::NewDate(isolate, time), &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<v8::Value> v8::Date::New(Isolate* isolate, double time) {
+ auto context = isolate->GetCurrentContext();
+ RETURN_TO_LOCAL_UNCHECKED(New(context, time), Value);
}
@@ -5808,13 +6096,9 @@ double v8::Date::ValueOf() const {
void v8::Date::DateTimeConfigurationChangeNotification(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ON_BAILOUT(i_isolate, "v8::Date::DateTimeConfigurationChangeNotification()",
- return);
LOG_API(i_isolate, "Date::DateTimeConfigurationChangeNotification");
ENTER_V8(i_isolate);
-
i_isolate->date_cache()->ResetDateCache();
-
if (!i_isolate->eternal_handles()->Exists(
i::EternalHandles::DATE_CACHE_VERSION)) {
return;
@@ -5843,18 +6127,24 @@ static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) {
}
-Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
- Flags flags) {
- i::Isolate* isolate = Utils::OpenHandle(*pattern)->GetIsolate();
- LOG_API(isolate, "RegExp::New");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::JSRegExp> obj;
- has_pending_exception = !i::Execution::NewJSRegExp(
- Utils::OpenHandle(*pattern),
- RegExpFlagsToString(flags)).ToHandle(&obj);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<v8::RegExp>());
- return Utils::ToLocal(i::Handle<i::JSRegExp>::cast(obj));
+MaybeLocal<v8::RegExp> v8::RegExp::New(Local<Context> context,
+ Handle<String> pattern, Flags flags) {
+ PREPARE_FOR_EXECUTION(context, "RegExp::New", RegExp);
+ Local<v8::RegExp> result;
+ has_pending_exception =
+ !ToLocal<RegExp>(i::Execution::NewJSRegExp(Utils::OpenHandle(*pattern),
+ RegExpFlagsToString(flags)),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(RegExp);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern, Flags flags) {
+ auto isolate =
+ reinterpret_cast<Isolate*>(Utils::OpenHandle(*pattern)->GetIsolate());
+ auto context = isolate->GetCurrentContext();
+ RETURN_TO_LOCAL_UNCHECKED(New(context, pattern, flags), RegExp);
}
@@ -5904,55 +6194,46 @@ uint32_t v8::Array::Length() const {
}
-Local<Object> Array::CloneElementAt(uint32_t index) {
- i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ON_BAILOUT(isolate, "v8::Array::CloneElementAt()", return Local<Object>());
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- if (!self->HasFastObjectElements()) {
- return Local<Object>();
- }
+MaybeLocal<Object> Array::CloneElementAt(Local<Context> context,
+ uint32_t index) {
+ PREPARE_FOR_EXECUTION(context, "v8::Array::CloneElementAt()", Object);
+ auto self = Utils::OpenHandle(this);
+ if (!self->HasFastObjectElements()) return Local<Object>();
i::FixedArray* elms = i::FixedArray::cast(self->elements());
i::Object* paragon = elms->get(index);
- if (!paragon->IsJSObject()) {
- return Local<Object>();
- }
+ if (!paragon->IsJSObject()) return Local<Object>();
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
- EXCEPTION_PREAMBLE(isolate);
- ENTER_V8(isolate);
- i::Handle<i::JSObject> result =
- isolate->factory()->CopyJSObject(paragon_handle);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Object>());
- return Utils::ToLocal(result);
+ Local<Object> result;
+ has_pending_exception = ToLocal<Object>(
+ isolate->factory()->CopyJSObject(paragon_handle), &result);
+ RETURN_ON_FAILED_EXECUTION(Object);
+ RETURN_ESCAPED(result);
}
-bool Value::IsPromise() const {
- i::Handle<i::Object> val = Utils::OpenHandle(this);
- if (!val->IsJSObject()) return false;
- i::Handle<i::JSObject> obj = i::Handle<i::JSObject>::cast(val);
- i::Isolate* isolate = obj->GetIsolate();
- LOG_API(isolate, "IsPromise");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> argv[] = { obj };
- i::Handle<i::Object> b;
- has_pending_exception = !i::Execution::Call(
- isolate,
- isolate->is_promise(),
- isolate->factory()->undefined_value(),
- arraysize(argv), argv,
- false).ToHandle(&b);
- EXCEPTION_BAILOUT_CHECK(isolate, false);
- return b->BooleanValue();
+Local<Object> Array::CloneElementAt(uint32_t index) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(CloneElementAt(context, index), Object);
}
-Local<Promise::Resolver> Promise::Resolver::New(Isolate* v8_isolate) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- LOG_API(isolate, "Promise::Resolver::New");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
+bool Value::IsPromise() const {
+ auto self = Utils::OpenHandle(this);
+ if (!self->IsJSObject()) return false;
+ auto js_object = i::Handle<i::JSObject>::cast(self);
+ // Promises can't have access checks.
+ if (js_object->map()->is_access_check_needed()) return false;
+ auto isolate = js_object->GetIsolate();
+ // TODO(dcarney): this should just be read from the symbol registry so as not
+ // to be context dependent.
+ auto key = isolate->promise_status();
+ // Shouldn't be possible to throw here.
+ return i::JSObject::HasRealNamedProperty(js_object, key).FromJust();
+}
+
+
+MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
+ PREPARE_FOR_EXECUTION(context, "Promise::Resolver::New", Resolver);
i::Handle<i::Object> result;
has_pending_exception = !i::Execution::Call(
isolate,
@@ -5960,8 +6241,14 @@ Local<Promise::Resolver> Promise::Resolver::New(Isolate* v8_isolate) {
isolate->factory()->undefined_value(),
0, NULL,
false).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise::Resolver>());
- return Local<Promise::Resolver>::Cast(Utils::ToLocal(result));
+ RETURN_ON_FAILED_EXECUTION(Promise::Resolver);
+ RETURN_ESCAPED(Local<Promise::Resolver>::Cast(Utils::ToLocal(result)));
+}
+
+
+Local<Promise::Resolver> Promise::Resolver::New(Isolate* isolate) {
+ RETURN_TO_LOCAL_UNCHECKED(New(isolate->GetCurrentContext()),
+ Promise::Resolver);
}
@@ -5971,94 +6258,107 @@ Local<Promise> Promise::Resolver::GetPromise() {
}
-void Promise::Resolver::Resolve(Handle<Value> value) {
- i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
- i::Isolate* isolate = promise->GetIsolate();
- LOG_API(isolate, "Promise::Resolver::Resolve");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
+Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
+ Handle<Value> value) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
has_pending_exception = i::Execution::Call(
isolate,
isolate->promise_resolve(),
isolate->factory()->undefined_value(),
arraysize(argv), argv,
false).is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(true);
}
-void Promise::Resolver::Reject(Handle<Value> value) {
- i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
- i::Isolate* isolate = promise->GetIsolate();
- LOG_API(isolate, "Promise::Resolver::Reject");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
+void Promise::Resolver::Resolve(Handle<Value> value) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ Resolve(context, value);
+}
+
+
+Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
+ Handle<Value> value) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, "Promise::Resolver::Resolve", bool);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
has_pending_exception = i::Execution::Call(
isolate,
isolate->promise_reject(),
isolate->factory()->undefined_value(),
arraysize(argv), argv,
false).is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(true);
+}
+
+
+void Promise::Resolver::Reject(Handle<Value> value) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ Reject(context, value);
+}
+
+
+MaybeLocal<Promise> Promise::Chain(Local<Context> context,
+ Handle<Function> handler) {
+ PREPARE_FOR_EXECUTION(context, "Promise::Chain", Promise);
+ auto self = Utils::OpenHandle(this);
+ i::Handle<i::Object> argv[] = {Utils::OpenHandle(*handler)};
+ i::Handle<i::Object> result;
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->promise_chain(), self,
+ arraysize(argv), argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(Promise);
+ RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
Local<Promise> Promise::Chain(Handle<Function> handler) {
- i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
- i::Isolate* isolate = promise->GetIsolate();
- LOG_API(isolate, "Promise::Chain");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(Chain(context, handler), Promise);
+}
+
+
+MaybeLocal<Promise> Promise::Catch(Local<Context> context,
+ Handle<Function> handler) {
+ PREPARE_FOR_EXECUTION(context, "Promise::Catch", Promise);
+ auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
i::Handle<i::Object> result;
- has_pending_exception = !i::Execution::Call(
- isolate,
- isolate->promise_chain(),
- promise,
- arraysize(argv), argv,
- false).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
- return Local<Promise>::Cast(Utils::ToLocal(result));
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->promise_catch(), self,
+ arraysize(argv), argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(Promise);
+ RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
Local<Promise> Promise::Catch(Handle<Function> handler) {
- i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
- i::Isolate* isolate = promise->GetIsolate();
- LOG_API(isolate, "Promise::Catch");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(Catch(context, handler), Promise);
+}
+
+
+MaybeLocal<Promise> Promise::Then(Local<Context> context,
+ Handle<Function> handler) {
+ PREPARE_FOR_EXECUTION(context, "Promise::Then", Promise);
+ auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
i::Handle<i::Object> result;
- has_pending_exception = !i::Execution::Call(
- isolate,
- isolate->promise_catch(),
- promise,
- arraysize(argv), argv,
- false).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
- return Local<Promise>::Cast(Utils::ToLocal(result));
+ has_pending_exception =
+ !i::Execution::Call(isolate, isolate->promise_then(), self,
+ arraysize(argv), argv, false).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION(Promise);
+ RETURN_ESCAPED(Local<Promise>::Cast(Utils::ToLocal(result)));
}
Local<Promise> Promise::Then(Handle<Function> handler) {
- i::Handle<i::JSObject> promise = Utils::OpenHandle(this);
- i::Isolate* isolate = promise->GetIsolate();
- LOG_API(isolate, "Promise::Then");
- ENTER_V8(isolate);
- EXCEPTION_PREAMBLE(isolate);
- i::Handle<i::Object> argv[] = { Utils::OpenHandle(*handler) };
- i::Handle<i::Object> result;
- has_pending_exception = !i::Execution::Call(
- isolate,
- isolate->promise_then(),
- promise,
- arraysize(argv), argv,
- false).ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
- return Local<Promise>::Cast(Utils::ToLocal(result));
+ auto context = ContextFromHeapObject(Utils::OpenHandle(this));
+ RETURN_TO_LOCAL_UNCHECKED(Then(context, handler), Promise);
}
@@ -6083,14 +6383,19 @@ bool v8::ArrayBuffer::IsNeuterable() const {
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
- i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
- Utils::ApiCheck(!obj->is_external(),
- "v8::ArrayBuffer::Externalize",
+ i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
+ Utils::ApiCheck(!self->is_external(), "v8::ArrayBuffer::Externalize",
"ArrayBuffer already externalized");
- obj->set_is_external(true);
- size_t byte_length = static_cast<size_t>(obj->byte_length()->Number());
+ self->set_is_external(true);
+ return GetContents();
+}
+
+
+v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
+ i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
+ size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
Contents contents;
- contents.data_ = obj->backing_store();
+ contents.data_ = self->backing_store();
contents.byte_length_ = byte_length;
return contents;
}
@@ -6128,13 +6433,16 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
- size_t byte_length) {
+ size_t byte_length,
+ ArrayBufferCreationMode mode) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
ENTER_V8(i_isolate);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer();
- i::Runtime::SetupArrayBuffer(i_isolate, obj, true, data, byte_length);
+ i::Runtime::SetupArrayBuffer(i_isolate, obj,
+ mode == ArrayBufferCreationMode::kExternalized,
+ data, byte_length);
return Utils::ToLocal(obj);
}
@@ -6154,6 +6462,48 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
}
+size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
+ i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
+ i::Isolate* isolate = obj->GetIsolate();
+ size_t byte_offset = i::NumberToSize(isolate, obj->byte_offset());
+ size_t bytes_to_copy =
+ i::Min(byte_length, i::NumberToSize(isolate, obj->byte_length()));
+ if (bytes_to_copy) {
+ i::DisallowHeapAllocation no_gc;
+ const char* source = nullptr;
+ if (obj->IsJSDataView()) {
+ i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj));
+ i::Handle<i::JSArrayBuffer> buffer(
+ i::JSArrayBuffer::cast(data_view->buffer()));
+ source = reinterpret_cast<char*>(buffer->backing_store());
+ } else {
+ DCHECK(obj->IsJSTypedArray());
+ i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*obj));
+ if (typed_array->buffer()->IsSmi()) {
+ i::Handle<i::FixedTypedArrayBase> fixed_array(
+ i::FixedTypedArrayBase::cast(typed_array->elements()));
+ source = reinterpret_cast<char*>(fixed_array->DataPtr());
+ } else {
+ i::Handle<i::JSArrayBuffer> buffer(
+ i::JSArrayBuffer::cast(typed_array->buffer()));
+ source = reinterpret_cast<char*>(buffer->backing_store());
+ }
+ }
+ memcpy(dest, source + byte_offset, bytes_to_copy);
+ }
+ return bytes_to_copy;
+}
+
+
+bool v8::ArrayBufferView::HasBuffer() const {
+ i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
+ if (obj->IsJSDataView()) return true;
+ DCHECK(obj->IsJSTypedArray());
+ i::Handle<i::JSTypedArray> typed_array(i::JSTypedArray::cast(*obj));
+ return !typed_array->buffer()->IsSmi();
+}
+
+
size_t v8::ArrayBufferView::ByteOffset() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_offset()->Number());
@@ -6518,10 +6868,6 @@ void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
}
-void Isolate::ClearInterrupt() {
-}
-
-
void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
CHECK(i::FLAG_expose_gc);
if (type == kMinorGarbageCollection) {
@@ -6544,8 +6890,13 @@ Isolate* Isolate::GetCurrent() {
Isolate* Isolate::New(const Isolate::CreateParams& params) {
- i::Isolate* isolate = new i::Isolate(params.enable_serializer);
+ i::Isolate* isolate = new i::Isolate(false);
Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
+ if (params.snapshot_blob != NULL) {
+ isolate->set_snapshot_blob(params.snapshot_blob);
+ } else {
+ isolate->set_snapshot_blob(i::Snapshot::DefaultSnapshotBlob());
+ }
if (params.entry_hook) {
isolate->set_function_entry_hook(params.entry_hook);
}
@@ -6554,12 +6905,30 @@ Isolate* Isolate::New(const Isolate::CreateParams& params) {
isolate->logger()->SetCodeEventHandler(kJitCodeEventDefault,
params.code_event_handler);
}
+ if (params.counter_lookup_callback) {
+ v8_isolate->SetCounterFunction(params.counter_lookup_callback);
+ }
+
+ if (params.create_histogram_callback) {
+ v8_isolate->SetCreateHistogramFunction(params.create_histogram_callback);
+ }
+
+ if (params.add_histogram_sample_callback) {
+ v8_isolate->SetAddHistogramSampleFunction(
+ params.add_histogram_sample_callback);
+ }
SetResourceConstraints(isolate, params.constraints);
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(v8_isolate);
if (params.entry_hook || !i::Snapshot::Initialize(isolate)) {
// If the isolate has a function entry hook, it needs to re-build all its
// code stubs with entry hooks embedded, so don't deserialize a snapshot.
+ if (i::Snapshot::EmbedsScript(isolate)) {
+ // If the snapshot embeds a script, we cannot initialize the isolate
+ // without the snapshot as a fallback. This is unlikely to happen though.
+ V8_Fatal(__FILE__, __LINE__,
+ "Initializing isolate from custom startup snapshot failed");
+ }
isolate->Init(NULL);
}
return v8_isolate;
@@ -6835,7 +7204,6 @@ bool Isolate::IsDead() {
bool Isolate::AddMessageListener(MessageCallback that, Handle<Value> data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
NeanderArray listeners(isolate->factory()->message_listeners());
@@ -6850,7 +7218,6 @@ bool Isolate::AddMessageListener(MessageCallback that, Handle<Value> data) {
void Isolate::RemoveMessageListeners(MessageCallback that) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- ON_BAILOUT(isolate, "v8::V8::RemoveMessageListeners()", return);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
NeanderArray listeners(isolate->factory()->message_listeners());
@@ -6968,21 +7335,12 @@ String::Value::~Value() {
Local<Value> Exception::NAME(v8::Handle<v8::String> raw_message) { \
i::Isolate* isolate = i::Isolate::Current(); \
LOG_API(isolate, #NAME); \
- ON_BAILOUT(isolate, "v8::Exception::" #NAME "()", return Local<Value>()); \
ENTER_V8(isolate); \
i::Object* error; \
{ \
i::HandleScope scope(isolate); \
i::Handle<i::String> message = Utils::OpenHandle(*raw_message); \
- i::Handle<i::Object> result; \
- EXCEPTION_PREAMBLE(isolate); \
- i::MaybeHandle<i::Object> maybe_result = \
- isolate->factory()->New##NAME(message); \
- has_pending_exception = !maybe_result.ToHandle(&result); \
- /* TODO(yangguo): crbug/403509. Return empty handle instead. */ \
- EXCEPTION_BAILOUT_CHECK( \
- isolate, v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate))); \
- error = *result; \
+ error = *isolate->factory()->New##NAME(message); \
} \
i::Handle<i::Object> result(error, isolate); \
return Utils::ToLocal(result); \
@@ -7022,7 +7380,6 @@ Local<StackTrace> Exception::GetStackTrace(Handle<Value> exception) {
bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Object> foreign = isolate->factory()->undefined_value();
@@ -7075,52 +7432,54 @@ void Debug::SendCommand(Isolate* isolate,
}
-Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
- v8::Handle<v8::Value> data) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Debug::Call()", return Local<Value>());
- ENTER_V8(isolate);
- i::MaybeHandle<i::Object> maybe_result;
- EXCEPTION_PREAMBLE(isolate);
+MaybeLocal<Value> Debug::Call(Local<Context> context,
+ v8::Handle<v8::Function> fun,
+ v8::Handle<v8::Value> data) {
+ PREPARE_FOR_EXECUTION(context, "v8::Debug::Call()", Value);
+ i::Handle<i::Object> data_obj;
if (data.IsEmpty()) {
- maybe_result = isolate->debug()->Call(
- Utils::OpenHandle(*fun), isolate->factory()->undefined_value());
+ data_obj = isolate->factory()->undefined_value();
} else {
- maybe_result = isolate->debug()->Call(
- Utils::OpenHandle(*fun), Utils::OpenHandle(*data));
+ data_obj = Utils::OpenHandle(*data);
}
- i::Handle<i::Object> result;
- has_pending_exception = !maybe_result.ToHandle(&result);
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return Utils::ToLocal(result);
+ Local<Value> result;
+ has_pending_exception =
+ !ToLocal<Value>(isolate->debug()->Call(Utils::OpenHandle(*fun), data_obj),
+ &result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
}
-Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
- i::Isolate* isolate = i::Isolate::Current();
- ON_BAILOUT(isolate, "v8::Debug::GetMirror()", return Local<Value>());
- ENTER_V8(isolate);
- v8::EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
+Local<Value> Debug::Call(v8::Handle<v8::Function> fun,
+ v8::Handle<v8::Value> data) {
+ auto context = ContextFromHeapObject(Utils::OpenHandle(*fun));
+ RETURN_TO_LOCAL_UNCHECKED(Call(context, fun, data), Value);
+}
+
+
+MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
+ v8::Handle<v8::Value> obj) {
+ PREPARE_FOR_EXECUTION(context, "v8::Debug::GetMirror()", Value);
i::Debug* isolate_debug = isolate->debug();
- EXCEPTION_PREAMBLE(isolate);
has_pending_exception = !isolate_debug->Load();
- v8::Local<v8::Value> result;
- if (!has_pending_exception) {
- i::Handle<i::JSObject> debug(
- isolate_debug->debug_context()->global_object());
- i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("MakeMirror"));
- i::Handle<i::Object> fun_obj =
- i::Object::GetProperty(debug, name).ToHandleChecked();
- i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
- v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun);
- const int kArgc = 1;
- v8::Handle<v8::Value> argv[kArgc] = { obj };
- result = v8_fun->Call(Utils::ToLocal(debug), kArgc, argv);
- has_pending_exception = result.IsEmpty();
- }
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
- return scope.Escape(result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
+ auto name = isolate->factory()->NewStringFromStaticChars("MakeMirror");
+ auto fun_obj = i::Object::GetProperty(debug, name).ToHandleChecked();
+ auto v8_fun = Utils::ToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
+ const int kArgc = 1;
+ v8::Handle<v8::Value> argv[kArgc] = {obj};
+ Local<Value> result;
+ has_pending_exception = !v8_fun->Call(context, Utils::ToLocal(debug), kArgc,
+ argv).ToLocal(&result);
+ RETURN_ON_FAILED_EXECUTION(Value);
+ RETURN_ESCAPED(result);
+}
+
+
+Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
+ RETURN_TO_LOCAL_UNCHECKED(GetMirror(Local<Context>(), obj), Value);
}
@@ -7211,7 +7570,7 @@ unsigned CpuProfileNode::GetHitCount() const {
unsigned CpuProfileNode::GetCallUid() const {
- return reinterpret_cast<const i::ProfileNode*>(this)->entry()->GetCallUid();
+ return reinterpret_cast<const i::ProfileNode*>(this)->function_id();
}
@@ -7297,11 +7656,6 @@ void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) {
}
-void CpuProfiler::StartCpuProfiling(Handle<String> title, bool record_samples) {
- StartProfiling(title, record_samples);
-}
-
-
CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
return reinterpret_cast<CpuProfile*>(
reinterpret_cast<i::CpuProfiler*>(this)->StopProfiling(
@@ -7309,11 +7663,6 @@ CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
}
-const CpuProfile* CpuProfiler::StopCpuProfiling(Handle<String> title) {
- return StopProfiling(title);
-}
-
-
void CpuProfiler::SetIdle(bool is_idle) {
i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
v8::StateTag state = isolate->current_vm_state();
@@ -7394,13 +7743,6 @@ SnapshotObjectId HeapGraphNode::GetId() const {
}
-int HeapGraphNode::GetSelfSize() const {
- size_t size = ToInternal(this)->self_size();
- CHECK(size <= static_cast<size_t>(internal::kMaxInt));
- return static_cast<int>(size);
-}
-
-
size_t HeapGraphNode::GetShallowSize() const {
return ToInternal(this)->self_size();
}
@@ -7434,18 +7776,6 @@ void HeapSnapshot::Delete() {
}
-unsigned HeapSnapshot::GetUid() const {
- return ToInternal(this)->uid();
-}
-
-
-Handle<String> HeapSnapshot::GetTitle() const {
- i::Isolate* isolate = i::Isolate::Current();
- return ToApiHandle<String>(
- isolate->factory()->InternalizeUtf8String(ToInternal(this)->title()));
-}
-
-
const HeapGraphNode* HeapSnapshot::GetRoot() const {
return reinterpret_cast<const HeapGraphNode*>(ToInternal(this)->root());
}
@@ -7522,12 +7852,10 @@ void HeapProfiler::ClearObjectIds() {
const HeapSnapshot* HeapProfiler::TakeHeapSnapshot(
- Handle<String> title,
- ActivityControl* control,
- ObjectNameResolver* resolver) {
+ ActivityControl* control, ObjectNameResolver* resolver) {
return reinterpret_cast<const HeapSnapshot*>(
- reinterpret_cast<i::HeapProfiler*>(this)->TakeSnapshot(
- *Utils::OpenHandle(*title), control, resolver));
+ reinterpret_cast<i::HeapProfiler*>(this)
+ ->TakeSnapshot(control, resolver));
}
@@ -7542,8 +7870,10 @@ void HeapProfiler::StopTrackingHeapObjects() {
}
-SnapshotObjectId HeapProfiler::GetHeapStats(OutputStream* stream) {
- return reinterpret_cast<i::HeapProfiler*>(this)->PushHeapObjectsStats(stream);
+SnapshotObjectId HeapProfiler::GetHeapStats(OutputStream* stream,
+ int64_t* timestamp_us) {
+ i::HeapProfiler* heap_profiler = reinterpret_cast<i::HeapProfiler*>(this);
+ return heap_profiler->PushHeapObjectsStats(stream, timestamp_us);
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index d9e3bbab9c..7fce3e3b0a 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -319,6 +319,18 @@ inline v8::Local<T> ToApiHandle(
}
+template <class T>
+inline bool ToLocal(v8::internal::MaybeHandle<v8::internal::Object> maybe,
+ Local<T>* local) {
+ v8::internal::Handle<v8::internal::Object> handle;
+ if (maybe.ToHandle(&handle)) {
+ *local = Utils::Convert<v8::internal::Object, T>(handle);
+ return true;
+ }
+ return false;
+}
+
+
// Implementations of ToLocal
#define MAKE_TO_LOCAL(Name, From, To) \
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 876cd3d1bd..0b5ced5159 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -121,7 +121,7 @@ Address RelocInfo::target_address_address() {
if (FLAG_enable_ool_constant_pool ||
Assembler::IsMovW(Memory::int32_at(pc_))) {
// We return the PC for ool constant pool since this function is used by the
- // serializerer and expects the address to reside within the code object.
+ // serializer and expects the address to reside within the code object.
return reinterpret_cast<Address>(pc_);
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
@@ -184,12 +184,24 @@ void RelocInfo::set_target_object(Object* target,
}
-Address RelocInfo::target_reference() {
+Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::target_internal_reference() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return Memory::Address_at(pc_);
+}
+
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@@ -298,11 +310,14 @@ Object** RelocInfo::call_object_address() {
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) ||
- IsCodeTarget(rmode_) ||
- IsRuntimeEntry(rmode_) ||
- IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, host_, NULL);
+ DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_));
+ if (IsInternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else {
+ Assembler::set_target_address_at(pc_, host_, NULL);
+ }
}
@@ -333,6 +348,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -358,6 +375,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
@@ -534,6 +553,12 @@ void Assembler::deserialization_set_special_target_at(
}
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ Memory::Address_at(pc) = target;
+}
+
+
bool Assembler::is_constant_pool_load(Address pc) {
if (CpuFeatures::IsSupported(ARMv7)) {
return !Assembler::IsMovW(Memory::int32_at(pc)) ||
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 7a091d0deb..2e300da487 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -42,7 +42,6 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -246,27 +245,6 @@ bool RelocInfo::IsInConstantPool() {
}
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED();
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
// See assembler-arm-inl.h for inlined constructors
@@ -1011,7 +989,7 @@ static bool fits_shifter(uint32_t imm32,
Instr* instr) {
// imm32 must be unsigned.
for (int rot = 0; rot < 16; rot++) {
- uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
+ uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
if ((imm8 <= 0xff)) {
*rotate_imm = rot;
*immed_8 = imm8;
@@ -3324,7 +3302,7 @@ Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
int Assembler::DecodeShiftImm(Instr instr) {
int rotate = Instruction::RotateValue(instr) * 2;
int immed8 = Instruction::Immed8Value(instr);
- return (immed8 >> rotate) | (immed8 << (32 - rotate));
+ return base::bits::RotateRight32(immed8, rotate);
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 7d1e0bdbee..fb02740982 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -45,7 +45,7 @@
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
-#include "src/serialize.h"
+#include "src/compiler.h"
namespace v8 {
namespace internal {
@@ -794,6 +794,11 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target);
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
// Here we are patching the address in the constant pool, not the actual call
// instruction. The address in the constant pool is the same size as a
// pointer.
@@ -823,6 +828,8 @@ class Assembler : public AssemblerBase {
static const int kPcLoadDelta = 8;
static const int kJSReturnSequenceInstructions = 4;
+ static const int kJSReturnSequenceLength =
+ kJSReturnSequenceInstructions * kInstrSize;
static const int kDebugBreakSlotInstructions = 3;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -1400,7 +1407,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
+ void RecordDeoptReason(const int reason, const SourcePosition position);
// Record the emission of a constant pool.
//
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index d13d4ffa25..40531caf45 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -929,7 +929,9 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Push function as parameter to the runtime call.
__ Push(r1);
// Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+ __ LoadRoot(
+ ip, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ push(ip);
__ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
@@ -1334,50 +1336,99 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+ const int calleeOffset) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
+ // Make r2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
+ __ sub(r2, sp, r2);
+ // Check if the arguments will overflow the stack.
+ __ cmp(r2, Operand::PointerOffsetFromSmiKey(r0));
+ __ b(gt, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ ldr(r1, MemOperand(fp, calleeOffset));
+ __ Push(r1, r0);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+
+ __ bind(&okay);
+}
+
+
+static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int argumentsOffset,
+ const int indexOffset,
+ const int limitOffset) {
+ Label entry, loop;
+ __ ldr(r0, MemOperand(fp, indexOffset));
+ __ b(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // r0: current argument index
+ __ bind(&loop);
+ __ ldr(r1, MemOperand(fp, argumentsOffset));
+ __ Push(r1, r0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(r0);
+
+ // Use inline caching to access the arguments.
+ __ ldr(r0, MemOperand(fp, indexOffset));
+ __ add(r0, r0, Operand(1 << kSmiTagSize));
+ __ str(r0, MemOperand(fp, indexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ ldr(r1, MemOperand(fp, limitOffset));
+ __ cmp(r0, r1);
+ __ b(ne, &loop);
+
+ // On exit, the pushed arguments count is in r0, untagged
+ __ SmiUntag(r0);
+}
+
+
+// Used by FunctionApply and ReflectApply
+static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
+ const int kFormalParameters = targetIsArgument ? 3 : 2;
+ const int kStackSize = kFormalParameters + 1;
{
FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+ const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ const int kFunctionOffset = kReceiverOffset + kPointerSize;
__ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r0);
- __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
+ __ ldr(r0, MemOperand(fp, kArgumentsOffset)); // get the args array
__ push(r0);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- // Make r2 the space we have left. The stack might already be overflowed
- // here which will cause r2 to become negative.
- __ sub(r2, sp, r2);
- // Check if the arguments will overflow the stack.
- __ cmp(r2, Operand::PointerOffsetFromSmiKey(r0));
- __ b(gt, &okay); // Signed comparison.
-
- // Out of stack space.
- __ ldr(r1, MemOperand(fp, kFunctionOffset));
- __ Push(r1, r0);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
+ if (targetIsArgument) {
+ __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ } else {
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ }
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current limit and index.
- __ bind(&okay);
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
__ push(r0); // limit
__ mov(r1, Operand::Zero()); // initial index
__ push(r1);
// Get the receiver.
- __ ldr(r0, MemOperand(fp, kRecvOffset));
+ __ ldr(r0, MemOperand(fp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
@@ -1434,44 +1485,19 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r0);
// Copy all arguments from the array to the stack.
- Label entry, loop;
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ b(&entry);
-
- // Load the current argument from the arguments array and push it to the
- // stack.
- // r0: current argument index
- __ bind(&loop);
- __ ldr(r1, MemOperand(fp, kArgsOffset));
- __ Push(r1, r0);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(r0);
-
- // Use inline caching to access the arguments.
- __ ldr(r0, MemOperand(fp, kIndexOffset));
- __ add(r0, r0, Operand(1 << kSmiTagSize));
- __ str(r0, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ldr(r1, MemOperand(fp, kLimitOffset));
- __ cmp(r0, r1);
- __ b(ne, &loop);
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
ParameterCount actual(r0);
- __ SmiUntag(r0);
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &call_proxy);
__ InvokeFunction(r1, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
- __ add(sp, sp, Operand(3 * kPointerSize));
+ __ add(sp, sp, Operand(kStackSize * kPointerSize));
__ Jump(lr);
// Call the function proxy.
@@ -1485,11 +1511,91 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Tear down the internal frame and remove function, receiver and args.
}
- __ add(sp, sp, Operand(3 * kPointerSize));
+ __ add(sp, sp, Operand(kStackSize * kPointerSize));
__ Jump(lr);
}
+static void Generate_ConstructHelper(MacroAssembler* masm) {
+ const int kFormalParameters = 3;
+ const int kStackSize = kFormalParameters + 1;
+
+ {
+ FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+ const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
+ const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+
+ // If newTarget is not supplied, set it to constructor
+ Label validate_arguments;
+ __ ldr(r0, MemOperand(fp, kNewTargetOffset));
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &validate_arguments);
+ __ ldr(r0, MemOperand(fp, kFunctionOffset));
+ __ str(r0, MemOperand(fp, kNewTargetOffset));
+
+ // Validate arguments
+ __ bind(&validate_arguments);
+ __ ldr(r0, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(r0);
+ __ ldr(r0, MemOperand(fp, kArgumentsOffset)); // get the args array
+ __ push(r0);
+ __ ldr(r0, MemOperand(fp, kNewTargetOffset)); // get the new.target
+ __ push(r0);
+ __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
+
+ // Push current limit and index.
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ __ push(r0); // limit
+ __ mov(r1, Operand::Zero()); // initial index
+ __ push(r1);
+ // Push newTarget and callee functions
+ __ ldr(r0, MemOperand(fp, kNewTargetOffset));
+ __ push(r0);
+ __ ldr(r0, MemOperand(fp, kFunctionOffset));
+ __ push(r0);
+
+ // Copy all arguments from the array to the stack.
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+
+ // Use undefined feedback vector
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ ldr(r1, MemOperand(fp, kFunctionOffset));
+
+ // Call the function.
+ CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ // Leave internal frame.
+ }
+ __ add(sp, sp, Operand(kStackSize * kPointerSize));
+ __ Jump(lr);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, false);
+}
+
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, true);
+}
+
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ Generate_ConstructHelper(masm);
+}
+
+
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 9188b58c32..52bc197230 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -12,6 +12,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@@ -1018,13 +1019,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r0, Heap::kExceptionRootIndex);
__ b(eq, &exception_returned);
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
-
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
Label okay;
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
__ mov(r2, Operand(pending_exception_address));
__ ldr(r2, MemOperand(r2));
__ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
@@ -1045,25 +1045,53 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ bind(&exception_returned);
- // Retrieve the pending exception.
- __ mov(r2, Operand(pending_exception_address));
- __ ldr(r0, MemOperand(r2));
-
- // Clear the pending exception.
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
- __ str(r3, MemOperand(r2));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- Label throw_termination_exception;
- __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
- __ b(eq, &throw_termination_exception);
-
- // Handle normal exception.
- __ Throw(r0);
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
+
+ // Ask the runtime for help to determine the handler. This will set r0 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, 0, r0);
+ __ mov(r0, Operand(0));
+ __ mov(r1, Operand(0));
+ __ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(find_handler, 3);
+ }
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(r0);
+ // Retrieve the handler context, SP and FP.
+ __ mov(cp, Operand(pending_handler_context_address));
+ __ ldr(cp, MemOperand(cp));
+ __ mov(sp, Operand(pending_handler_sp_address));
+ __ ldr(sp, MemOperand(sp));
+ __ mov(fp, Operand(pending_handler_fp_address));
+ __ ldr(fp, MemOperand(fp));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ __ cmp(cp, Operand(0));
+ __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
+
+ // Compute the handler entry address and jump to it.
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ __ mov(r1, Operand(pending_handler_code_address));
+ __ ldr(r1, MemOperand(r1));
+ __ mov(r2, Operand(pending_handler_offset_address));
+ __ ldr(r2, MemOperand(r2));
+ if (FLAG_enable_ool_constant_pool) {
+ __ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset));
+ }
+ __ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ add(pc, r1, r2);
}
@@ -1152,7 +1180,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
@@ -1161,11 +1189,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r0, Heap::kExceptionRootIndex);
__ b(&exit);
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ bind(&invoke);
// Must preserve r0-r4, r5-r6 are available.
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ __ PushStackHandler();
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bl(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
@@ -1202,7 +1229,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Call(ip);
// Unlink this frame from the handler chain.
- __ PopTryHandler();
+ __ PopStackHandler();
__ bind(&exit); // r0 holds result
// Check if the current stack frame is marked as the outermost JS frame.
@@ -1479,7 +1506,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1819,8 +1846,12 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ Label skip_decrement;
+ __ b(eq, &skip_decrement);
// Subtract 1 from smi-tagged arguments count.
__ sub(r1, r1, Operand(2));
+ __ bind(&skip_decrement);
}
__ str(r1, MemOperand(sp, 0));
__ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1));
@@ -1930,7 +1961,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2200,18 +2231,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(r0, r1);
__ b(eq, &runtime);
- __ str(r1, MemOperand(r2, 0)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex);
-
- Label termination_exception;
- __ b(eq, &termination_exception);
-
- __ Throw(r0);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(r0);
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ bind(&failure);
// For failure and exception return null.
@@ -2306,7 +2327,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2883,7 +2904,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
+ MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@@ -2896,8 +2917,13 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ } else {
+ // index_ is consumed by runtime conversion function.
+ __ Push(object_, index_);
+ }
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
@@ -2908,7 +2934,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r0);
- __ pop(object_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister(), object_);
+ } else {
+ __ pop(object_);
+ }
// Reload the instance type.
__ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
@@ -3221,7 +3252,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// r0: original string
@@ -3408,7 +3439,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@@ -3689,7 +3720,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ bind(&miss);
@@ -4298,15 +4329,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorLoadStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawLoadStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorKeyedLoadStub stub(isolate());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawKeyedLoadStub stub(isolate());
+ stub.GenerateForTrampoline(masm);
}
@@ -4324,6 +4355,237 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
+void VectorRawLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register feedback, Register scratch1,
+ Register scratch2, Register scratch3,
+ bool is_polymorphic, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+
+ Register receiver_map = scratch1;
+ Register cached_map = scratch2;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&compare_map);
+ __ ldr(cached_map,
+ FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+ __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ cmp(receiver_map, cached_map);
+ __ b(ne, &start_polymorphic);
+ // found, now call handler.
+ Register handler = feedback;
+ __ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+
+ Register length = scratch3;
+ __ bind(&start_polymorphic);
+ __ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+ if (!is_polymorphic) {
+ // If the IC could be monomorphic we have to make sure we don't go past the
+ // end of the feedback array.
+ __ cmp(length, Operand(Smi::FromInt(2)));
+ __ b(eq, miss);
+ }
+
+ Register too_far = length;
+ Register pointer_reg = feedback;
+
+ // +-----+------+------+-----+-----+ ... ----+
+ // | map | len | wm0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch3
+ // also need receiver_map (aka scratch1)
+ // use cached_map (scratch2) to look in the weak map values.
+ __ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
+ __ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ add(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ ldr(cached_map, MemOperand(pointer_reg));
+ __ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ cmp(receiver_map, cached_map);
+ __ b(ne, &prepare_next);
+ __ ldr(handler, MemOperand(pointer_reg, kPointerSize));
+ __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&prepare_next);
+ __ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
+ __ cmp(pointer_reg, too_far);
+ __ b(lt, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register weak_cell, Register scratch,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+ Register receiver_map = scratch;
+ Register cached_map = weak_cell;
+
+ // Move the weak map into the weak_cell register.
+ __ ldr(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ cmp(cached_map, receiver_map);
+ __ b(ne, miss);
+
+ Register handler = weak_cell;
+ __ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
+ __ ldr(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ __ bind(&compare_smi_map);
+ __ CompareRoot(weak_cell, Heap::kHeapNumberMapRootIndex);
+ __ b(ne, miss);
+ __ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
+ __ ldr(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+
+
+void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
+ Register name = VectorLoadICDescriptor::NameRegister(); // r2
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
+ Register feedback = r4;
+ Register scratch1 = r5;
+
+ __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
+ __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
+ __ b(ne, &try_array);
+ HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
+ &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ b(ne, &not_array);
+ HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, r8,
+ r9, true, &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ b(ne, &miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+ false, receiver, name, feedback,
+ scratch1, r8, r9);
+
+ __ bind(&miss);
+ LoadIC::GenerateMiss(masm);
+}
+
+
+void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r1
+ Register key = VectorLoadICDescriptor::NameRegister(); // r2
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // r3
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // r0
+ Register feedback = r4;
+ Register scratch1 = r5;
+
+ __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
+ __ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
+ __ b(ne, &try_array);
+ HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
+ &miss);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ b(ne, &not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r8,
+ r9, true, &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ b(ne, &try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmp(key, feedback);
+ __ b(ne, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
+ __ ldr(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r8,
+ r9, false, &miss);
+
+ __ bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -4788,7 +5050,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
}
Label promote_scheduled_exception;
- Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
@@ -4810,15 +5071,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ cmp(r5, ip);
__ b(ne, &delete_allocated_handles);
- // Check if the function scheduled an exception.
+ // Leave the API exit frame.
__ bind(&leave_exit_frame);
- __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
- __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
- __ ldr(r5, MemOperand(ip));
- __ cmp(r4, r5);
- __ b(ne, &promote_scheduled_exception);
- __ bind(&exception_handled);
-
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
__ ldr(cp, *context_restore_operand);
@@ -4830,15 +5084,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ mov(r4, Operand(stack_space));
}
__ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ ldr(r5, MemOperand(ip));
+ __ cmp(r4, r5);
+ __ b(ne, &promote_scheduled_exception);
+
__ mov(pc, lr);
+ // Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
- }
- __ jmp(&exception_handled);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index 4a340708f9..4bbfd375a8 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -45,6 +45,18 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
register uint32_t end asm("r1") = beg + size;
register uint32_t flg asm("r2") = 0;
+#ifdef __clang__
+ // This variant of the asm avoids a constant pool entry, which can be
+ // problematic when LTO'ing. It is also slightly shorter.
+ register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
+
+ asm volatile("svc 0\n"
+ :
+ : "r"(beg), "r"(end), "r"(flg), "r"(scno)
+ : "memory");
+#else
+ // Use a different variant of the asm with GCC because some versions doesn't
+ // support r7 as an asm input.
asm volatile(
// This assembly works for both ARM and Thumb targets.
@@ -62,6 +74,7 @@ void CpuFeatures::FlushICache(void* start, size_t size) {
: "r" (beg), "r" (end), "r" (flg), [scno] "i" (__ARM_NR_cacheflush)
: "memory");
#endif
+#endif
}
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index c9100576d2..d9c25c6588 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -12,12 +12,7 @@
namespace v8 {
namespace internal {
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
+void BreakLocation::SetDebugBreakAtReturn() {
// Patch the code changing the return from JS function sequence from
// mov sp, fp
// ldmia sp!, {fp, lr}
@@ -28,7 +23,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// blx ip
// <debug break return code entry point address>
// bkpt 0
- CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+ CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(
@@ -37,29 +32,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
}
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceInstructions);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
+void BreakLocation::SetDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
// Patch the code changing the debug break slot code from
// mov r2, r2
@@ -69,7 +42,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// ldr ip, [pc, #0]
// blx ip
// <debug break slot code entry point address>
- CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
patcher.masm()->blx(v8::internal::ip);
patcher.Emit(
@@ -77,13 +50,6 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
}
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index be05344e16..9359768e07 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -135,7 +135,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Save all general purpose registers before messing with them.
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 4e631b08a3..19f8c2f8e2 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -33,6 +33,7 @@
#if V8_TARGET_ARCH_ARM
#include "src/arm/constants-arm.h"
+#include "src/base/bits.h"
#include "src/base/platform/platform.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
@@ -226,7 +227,7 @@ void Decoder::PrintShiftRm(Instruction* instr) {
void Decoder::PrintShiftImm(Instruction* instr) {
int rotate = instr->RotateValue() * 2;
int immed8 = instr->Immed8Value();
- int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+ int imm = base::bits::RotateRight32(immed8, rotate);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%d", imm);
}
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index ce65e887f8..3720a2bde0 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -152,11 +152,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-inline void StackHandler::SetFp(Address slot, Address fp) {
- Memory::Address_at(slot) = fp;
-}
-
-
} } // namespace v8::internal
#endif // V8_ARM_FRAMES_ARM_H_
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 5b56e43434..6ee8eb1cd6 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -107,7 +107,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@@ -195,7 +196,7 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in r1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
- if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+ if (info->scope()->is_script_scope()) {
__ push(r1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
@@ -240,6 +241,11 @@ void FullCodeGenerator::Generate() {
}
}
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
@@ -248,6 +254,11 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
+ if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
+ --num_parameters;
+ ++rest_index;
+ }
+
__ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ mov(r2, Operand(Smi::FromInt(num_parameters)));
__ mov(r1, Operand(Smi::FromInt(rest_index)));
@@ -281,10 +292,6 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
@@ -1529,7 +1536,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
- CallLoadIC(CONTEXTUAL);
+ CallGlobalLoadIC(var->name());
context()->Plug(r0);
break;
}
@@ -2177,7 +2184,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
__ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
__ Push(load_name, r3, r0); // "throw", iter, except
@@ -2188,16 +2194,17 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(r0); // result
- __ PushTryHandler(StackHandler::CATCH, expr->index());
- const int handler_size = StackHandlerConstants::kSize;
+ EnterTryBlock(expr->index(), &l_catch);
+ const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(r0); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + handler_size;
+ const int generator_object_depth = kPointerSize + try_block_size;
__ ldr(r0, MemOperand(sp, generator_object_depth));
__ push(r0); // g
+ __ Push(Smi::FromInt(expr->index())); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
__ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
@@ -2205,12 +2212,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(r1, cp);
__ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(r0); // result
EmitReturnSequence();
__ bind(&l_resume); // received in r0
- __ PopTryHandler();
+ ExitTryBlock(expr->index());
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2570,6 +2577,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
__ push(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ push(r0);
+ }
+
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@@ -2713,25 +2730,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(r0);
- __ mov(r0, Operand(var->name()));
- __ Push(cp, r0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2748,6 +2746,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r3, location);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &const_error);
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2769,8 +2782,33 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ DCHECK(var->mode() == CONST_LEGACY);
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ push(r0);
+ __ mov(r0, Operand(var->name()));
+ __ Push(cp, r0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r1);
+ __ ldr(r2, location);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ b(ne, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ }
+ // Silently ignore store in sloppy mode.
}
}
@@ -2900,7 +2938,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
- __ Push(isolate()->factory()->undefined_value());
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ push(ip);
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -3260,7 +3299,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- if (!ValidateSuperCall(expr)) return;
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(result_register(), new_target_var);
__ Push(result_register());
@@ -3764,8 +3802,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
+ Register instance_type = r2;
+ __ GetMapConstructor(r0, r0, r1, instance_type);
+ __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
__ b(ne, &non_function_constructor);
// r0 now contains the constructor function. Grab the
@@ -4062,7 +4101,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4109,7 +4148,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4284,7 +4323,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ bind(&done);
context()->Plug(r0);
@@ -4570,18 +4609,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- if (expr->function() != NULL &&
- expr->function()->intrinsic_type == Runtime::INLINE) {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
+ Comment cmnt(masm_, "[ CallRuntime");
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
__ ldr(receiver, GlobalObjectOperand());
@@ -4604,7 +4636,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ str(r0, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
- int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@@ -4619,15 +4650,29 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r0);
+
} else {
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ const Runtime::Function* function = expr->function();
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: { \
+ Comment cmnt(masm_, "[ Inline" #Name); \
+ return Emit##Name(expr); \
+ }
+ FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- context()->Plug(r0);
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(r0);
+ }
+ }
}
}
@@ -5273,20 +5318,6 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ mov(ip, Operand(pending_message_obj));
__ ldr(r1, MemOperand(ip));
__ push(r1);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(ip, Operand(has_pending_message));
- STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
- __ ldrb(r1, MemOperand(ip));
- __ SmiTag(r1);
- __ push(r1);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(ip, Operand(pending_message_script));
- __ ldr(r1, MemOperand(ip));
- __ push(r1);
}
@@ -5294,20 +5325,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(r1));
// Restore pending message from stack.
__ pop(r1);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(ip, Operand(pending_message_script));
- __ str(r1, MemOperand(ip));
-
- __ pop(r1);
- __ SmiUntag(r1);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(ip, Operand(has_pending_message));
- STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
- __ strb(r1, MemOperand(ip));
-
- __ pop(r1);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
@@ -5325,34 +5342,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ ldr(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ PopTryHandler();
- __ bl(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
static Address GetInterruptImmediateLoadAddress(Address pc) {
Address load_address = pc - 2 * Assembler::kInstrSize;
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index da0cba9d10..64d00556c3 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -227,6 +227,12 @@ void InternalArrayConstructorDescriptor::Initialize(
}
+void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r1, r0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 2e097f9302..b45e1c5226 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -2140,14 +2140,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object =
@@ -2162,16 +2154,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to check the value in the cell in the case where we perform
- // a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
- : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index fc8b300f35..fbf648f0e4 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -100,7 +100,6 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
@@ -142,7 +141,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1704,13 +1702,6 @@ class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@@ -1732,21 +1723,6 @@ class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 5b6ed2caf3..202631837d 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -119,7 +120,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
+ if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -345,54 +346,40 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
- if (needs_frame.is_bound()) {
- __ b(&needs_frame);
- } else {
- __ bind(&needs_frame);
- Comment(";;; call deopt with frame");
- __ PushFixedFrame();
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(ip);
- __ add(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ bind(&call_deopt_entry);
- // Add the base address to the offset previously loaded in
- // entry_offset.
- __ add(entry_offset, entry_offset,
- Operand(ExternalReference::ForDeoptEntry(base)));
- __ blx(entry_offset);
- }
-
- masm()->CheckConstPool(false, false);
+ Comment(";;; call deopt with frame");
+ __ PushFixedFrame();
+ __ bl(&needs_frame);
} else {
- // The last entry can fall through into `call_deopt_entry`, avoiding a
- // branch.
- bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
-
- if (need_branch) __ b(&call_deopt_entry);
-
- masm()->CheckConstPool(false, !need_branch);
+ __ bl(&call_deopt_entry);
}
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
+ masm()->CheckConstPool(false, false);
}
- if (!call_deopt_entry.is_bound()) {
- Comment(";;; call deopt");
- __ bind(&call_deopt_entry);
+ if (needs_frame.is_linked()) {
+ __ bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(ip);
+ __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ }
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
+ Comment(";;; call deopt");
+ __ bind(&call_deopt_entry);
- // Add the base address to the offset previously loaded in entry_offset.
- __ add(entry_offset, entry_offset,
- Operand(ExternalReference::ForDeoptEntry(base)));
- __ blx(entry_offset);
+ if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ RestoreCallerDoubles();
}
+
+ // Add the base address to the offset previously loaded in entry_offset.
+ __ add(entry_offset, entry_offset,
+ Operand(ExternalReference::ForDeoptEntry(base)));
+ __ bx(entry_offset);
}
// Force constant pool emission at the end of the deopt jump table to make
@@ -893,8 +880,8 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ stop("trap_on_deopt", condition);
}
- Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
@@ -902,6 +889,7 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
@@ -2729,10 +2717,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+ Register instance_type = ip;
+ __ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
- __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
+ __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
__ b(ne, is_true);
} else {
@@ -2838,8 +2827,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// root array to force relocation to be able to later patch with
// the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ mov(ip, Operand(Handle<Object>(cell)));
- __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
+ __ mov(ip, Operand(cell));
+ __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(map, Operand(ip));
__ b(ne, &cache_miss);
__ bind(deferred->load_bool()); // Label for calculating code patching.
@@ -2993,18 +2982,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
- __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
- }
-}
-
-
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@@ -3034,36 +3011,12 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Register cell = scratch0();
-
- // Load the cell.
- __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We use a temp to check the payload (CompareRoot might clobber ip).
- Register payload = ToRegister(instr->temp());
- __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
- }
-
- // Store the value.
- __ str(value, FieldMemOperand(cell, Cell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3156,8 +3109,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL,
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -3448,7 +3402,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4309,7 +4265,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
+ Handle<Code> ic =
+ StoreIC::initialize_stub(isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -4530,8 +4488,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
}
@@ -5239,7 +5198,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ mov(ip, Operand(Handle<Object>(cell)));
+ __ mov(ip, Operand(cell));
__ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index c8fb60dafa..b3cacc8cef 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -439,6 +439,7 @@ void MacroAssembler::LoadRoot(Register destination,
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
str(source, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@@ -1395,44 +1396,22 @@ void MacroAssembler::DebugBreak() {
}
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
+void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // For the JSEntry handler, we must preserve r0-r4, r5-r6 are available.
- // We will build up the handler from the bottom by pushing on the stack.
- // Set up the code object (r5) and the state (r6) for pushing.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- mov(r5, Operand(CodeObject()));
- mov(r6, Operand(state));
-
- // Push the frame pointer, context, state, and code object.
- if (kind == StackHandler::JS_ENTRY) {
- mov(cp, Operand(Smi::FromInt(0))); // Indicates no context.
- mov(ip, Operand::Zero()); // NULL frame pointer.
- stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | ip.bit());
- } else {
- stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit());
- }
// Link the current handler as the next handler.
mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ldr(r5, MemOperand(r6));
push(r5);
+
// Set this new handler as the current one.
str(sp, MemOperand(r6));
}
-void MacroAssembler::PopTryHandler() {
+void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(r1);
mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
@@ -1441,98 +1420,6 @@ void MacroAssembler::PopTryHandler() {
}
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // r0 = exception, r1 = code object, r2 = state.
-
- ConstantPoolUnavailableScope constant_pool_unavailable(this);
- if (FLAG_enable_ool_constant_pool) {
- ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant pool.
- }
- ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler table.
- add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
- ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2)); // Smi-tagged offset.
- add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- add(pc, r1, Operand::SmiUntag(r2)); // Jump
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in r0.
- if (!value.is(r0)) {
- mov(r0, value);
- }
- // Drop the stack pointer to the top of the top handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(sp, MemOperand(r3));
- // Restore the next handler.
- pop(r2);
- str(r2, MemOperand(r3));
-
- // Get the code object (r1) and state (r2). Restore the context and frame
- // pointer.
- ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
- // or cp.
- tst(cp, cp);
- str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in r0.
- if (!value.is(r0)) {
- mov(r0, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- mov(r3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ldr(sp, MemOperand(r3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind);
- bind(&fetch_next);
- ldr(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- ldr(r2, MemOperand(sp, StackHandlerConstants::kStateOffset));
- tst(r2, Operand(StackHandler::KindField::kMask));
- b(ne, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(r2);
- str(r2, MemOperand(r3));
- // Get the code object (r1) and state (r2). Clear the context and frame
- // pointer (0 was saved in the handler).
- ldm(ia_w, sp, r1.bit() | r2.bit() | cp.bit() | fp.bit());
-
- JumpToHandlerEntry();
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss) {
@@ -2292,6 +2179,20 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
}
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+ Register temp, Register temp2) {
+ Label done, loop;
+ ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
+ bind(&loop);
+ JumpIfSmi(result, &done);
+ CompareObjectType(result, temp, temp2, MAP_TYPE);
+ b(ne, &done);
+ ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
+ b(&loop);
+ bind(&done);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -2345,7 +2246,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ GetMapConstructor(result, result, scratch, ip);
}
// All done.
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 5de013e270..f92aab4eb0 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -643,19 +643,12 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
- // Unlink the stack handler on top of the stack from the try handler chain.
+ // Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
- void PopTryHandler();
-
- // Passes thrown value to the handler of top of the try handler chain.
- void Throw(Register value);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain.
- void ThrowUncatchable(Register value);
+ void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -811,6 +804,11 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Support functions.
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done, and |temp2| its instance type.
+ void GetMapConstructor(Register result, Register map, Register temp,
+ Register temp2);
+
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
@@ -1464,10 +1462,6 @@ class MacroAssembler: public Assembler {
Register bitmap_reg,
Register mask_reg);
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 209b5d2ae8..4c681ae764 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -13,6 +13,7 @@
#include "src/arm/constants-arm.h"
#include "src/arm/simulator-arm.h"
#include "src/assembler.h"
+#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
@@ -1506,7 +1507,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
int32_t Simulator::GetImm(Instruction* instr, bool* carry_out) {
int rotate = instr->RotateValue() * 2;
int immed8 = instr->Immed8Value();
- int imm = (immed8 >> rotate) | (immed8 << (32 - rotate));
+ int imm = base::bits::RotateRight32(immed8, rotate);
*carry_out = (rotate == 0) ? c_flag_ : (imm < 0);
return imm;
}
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index 4547efef0c..7d8d81e38d 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -18,7 +18,12 @@ bool CpuFeatures::SupportsCrankshaft() { return true; }
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
- UNIMPLEMENTED();
+ // On arm64 only internal references need extra work.
+ DCHECK(RelocInfo::IsInternalReference(rmode_));
+
+ // Absolute code pointer inside code object moves with the code object.
+ intptr_t* p = reinterpret_cast<intptr_t*>(pc_);
+ *p += delta; // Relocate entry.
}
@@ -654,6 +659,12 @@ void Assembler::deserialization_set_special_target_at(
}
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ Memory::Address_at(pc) = target;
+}
+
+
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
Address target,
@@ -733,12 +744,24 @@ void RelocInfo::set_target_object(Object* target,
}
-Address RelocInfo::target_reference() {
+Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::target_internal_reference() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return Memory::Address_at(pc_);
+}
+
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@@ -826,11 +849,14 @@ void RelocInfo::set_call_address(Address target) {
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) ||
- IsCodeTarget(rmode_) ||
- IsRuntimeEntry(rmode_) ||
- IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, host_, NULL);
+ DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_));
+ if (IsInternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else {
+ Assembler::set_target_address_at(pc_, host_, NULL);
+ }
}
@@ -838,7 +864,7 @@ bool RelocInfo::IsPatchedReturnSequence() {
// The sequence must be:
// ldr ip0, [pc, #offset]
// blr ip0
- // See arm64/debug-arm64.cc BreakLocationIterator::SetDebugBreakAtReturn().
+ // See arm64/debug-arm64.cc BreakLocation::SetDebugBreakAtReturn().
Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
Instruction* i2 = i1->following();
return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
@@ -862,6 +888,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ visitor->VisitInternalReference(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
@@ -885,6 +913,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ StaticVisitor::VisitInternalReference(this);
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index bba78c89e4..d072433633 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -171,7 +171,7 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-const int RelocInfo::kApplyMask = 0;
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
bool RelocInfo::IsCodedSpecially() {
@@ -188,26 +188,6 @@ bool RelocInfo::IsInConstantPool() {
}
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CpuFeatures::FlushICache(pc_, instruction_count * kInstructionSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- UNIMPLEMENTED();
-}
-
-
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
Register reg3, Register reg4) {
CPURegList regs(reg1, reg2, reg3, reg4);
@@ -752,7 +732,15 @@ void Assembler::bind(Label* label) {
DCHECK(prevlinkoffset >= 0);
// Update the link to point to the label.
- link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ if (link->IsUnresolvedInternalReference()) {
+ // Internal references do not get patched to an instruction but directly
+ // to an address.
+ internal_reference_positions_.push_back(linkoffset);
+ PatchingAssembler patcher(link, 2);
+ patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
+ } else {
+ link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ }
// Link the label to the previous link in the chain.
if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
@@ -2080,6 +2068,50 @@ void Assembler::ucvtf(const FPRegister& fd,
}
+void Assembler::dcptr(Label* label) {
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ if (label->is_bound()) {
+ // The label is bound, so it does not need to be updated and the internal
+ // reference should be emitted.
+ //
+ // In this case, label->pos() returns the offset of the label from the
+ // start of the buffer.
+ internal_reference_positions_.push_back(pc_offset());
+ dc64(reinterpret_cast<uintptr_t>(buffer_ + label->pos()));
+ } else {
+ int32_t offset;
+ if (label->is_linked()) {
+ // The label is linked, so the internal reference should be added
+ // onto the end of the label's link chain.
+ //
+ // In this case, label->pos() returns the offset of the last linked
+ // instruction from the start of the buffer.
+ offset = label->pos() - pc_offset();
+ DCHECK(offset != kStartOfLabelLinkChain);
+ } else {
+ // The label is unused, so it now becomes linked and the internal
+ // reference is at the start of the new link chain.
+ offset = kStartOfLabelLinkChain;
+ }
+ // The instruction at pc is now the last link in the label's chain.
+ label->link_to(pc_offset());
+
+ // Traditionally the offset to the previous instruction in the chain is
+ // encoded in the instruction payload (e.g. branch range) but internal
+ // references are not instructions so while unbound they are encoded as
+ // two consecutive brk instructions. The two 16-bit immediates are used
+ // to encode the offset.
+ offset >>= kInstructionSizeLog2;
+ DCHECK(is_int32(offset));
+ uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
+ uint32_t low16 = unsigned_bitextract_32(15, 0, offset);
+
+ brk(high16);
+ brk(low16);
+ }
+}
+
+
// Note:
// Below, a difference in case for the same letter indicates a
// negated bit.
@@ -2839,6 +2871,12 @@ void Assembler::GrowBuffer() {
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
+ // Relocate internal references.
+ for (auto pos : internal_reference_positions_) {
+ intptr_t* p = reinterpret_cast<intptr_t*>(buffer_ + pos);
+ *p += pc_delta;
+ }
+
// Pending relocation entries are also relative, no need to relocate.
}
@@ -2848,6 +2886,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
if (((rmode >= RelocInfo::JS_RETURN) &&
(rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
+ (rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) ||
(rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_REASON)) {
@@ -2857,6 +2896,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsDeoptReason(rmode)
|| RelocInfo::IsPosition(rmode)
+ || RelocInfo::IsInternalReference(rmode)
|| RelocInfo::IsConstPool(rmode)
|| RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index 996898553c..d672589462 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -5,14 +5,15 @@
#ifndef V8_ARM64_ASSEMBLER_ARM64_H_
#define V8_ARM64_ASSEMBLER_ARM64_H_
+#include <deque>
#include <list>
#include <map>
#include <vector>
#include "src/arm64/instructions-arm64.h"
#include "src/assembler.h"
+#include "src/compiler.h"
#include "src/globals.h"
-#include "src/serialize.h"
#include "src/utils.h"
@@ -900,6 +901,11 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target);
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
// All addresses in the constant pool are the same size as pointers.
static const int kSpecialTargetSize = kPointerSize;
@@ -951,7 +957,9 @@ class Assembler : public AssemblerBase {
// Number of instructions generated for the return sequence in
// FullCodeGenerator::EmitReturnSequence.
- static const int kJSRetSequenceInstructions = 7;
+ static const int kJSReturnSequenceInstructions = 7;
+ static const int kJSReturnSequenceLength =
+ kJSReturnSequenceInstructions * kInstructionSize;
// Distance between start of patched return sequence and the emitted address
// to jump to.
static const int kPatchReturnSequenceAddressOffset = 0;
@@ -959,7 +967,7 @@ class Assembler : public AssemblerBase {
// Number of instructions necessary to be able to later patch it to a call.
// See DebugCodegen::GenerateSlot() and
- // BreakLocationIterator::SetDebugBreakAtSlot().
+ // BreakLocation::SetDebugBreakAtSlot().
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstructionSize;
@@ -1010,7 +1018,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
+ void RecordDeoptReason(const int reason, const SourcePosition position);
int buffer_space() const;
@@ -1743,6 +1751,9 @@ class Assembler : public AssemblerBase {
// Emit 64 bits of data in the instruction stream.
void dc64(uint64_t data) { EmitData(&data, sizeof(data)); }
+ // Emit an address in the instruction stream.
+ void dcptr(Label* label);
+
// Copy a string into the instruction stream, including the terminating NULL
// character. The instruction pointer (pc_) is then aligned correctly for
// subsequent instructions.
@@ -2159,6 +2170,10 @@ class Assembler : public AssemblerBase {
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
+ // Internal reference positions, required for (potential) patching in
+ // GrowBuffer(); contains only those internal references whose labels
+ // are already bound.
+ std::deque<int> internal_reference_positions_;
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted
diff --git a/deps/v8/src/arm64/builtins-arm64.cc b/deps/v8/src/arm64/builtins-arm64.cc
index 89e304051a..dfb59c0504 100644
--- a/deps/v8/src/arm64/builtins-arm64.cc
+++ b/deps/v8/src/arm64/builtins-arm64.cc
@@ -1324,53 +1324,106 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- ASM_LOCATION("Builtins::Generate_FunctionApply");
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
- const int kArgsOffset = 2 * kPointerSize;
- const int kReceiverOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+ const int calleeOffset) {
+ Register argc = x0;
+ Register function = x15;
+
+ // Check the stack for overflow.
+ // We are not trying to catch interruptions (e.g. debug break and
+ // preemption) here, so the "real stack limit" is checked.
+ Label enough_stack_space;
+ __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
+ __ Ldr(function, MemOperand(fp, calleeOffset));
+ // Make x10 the space we have left. The stack might already be overflowed
+ // here which will cause x10 to become negative.
+ // TODO(jbramley): Check that the stack usage here is safe.
+ __ Sub(x10, jssp, x10);
+ // Check if the arguments will overflow the stack.
+ __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
+ __ B(gt, &enough_stack_space);
+ // There is not enough stack space, so use a builtin to throw an appropriate
+ // error.
+ __ Push(function, argc);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+ // We should never return from the APPLY_OVERFLOW builtin.
+ if (__ emit_debug_code()) {
+ __ Unreachable();
+ }
+
+ __ Bind(&enough_stack_space);
+}
+
+
+static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int argumentsOffset,
+ const int indexOffset,
+ const int limitOffset) {
+ Label entry, loop;
+ Register current = x0;
+ __ Ldr(current, MemOperand(fp, indexOffset));
+ __ B(&entry);
+
+ __ Bind(&loop);
+ // Load the current argument from the arguments array and push it.
+ // TODO(all): Couldn't we optimize this for JS arrays?
+
+ __ Ldr(x1, MemOperand(fp, argumentsOffset));
+ __ Push(x1, current);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ Push(x0);
+
+ // Use inline caching to access the arguments.
+ __ Ldr(current, MemOperand(fp, indexOffset));
+ __ Add(current, current, Smi::FromInt(1));
+ __ Str(current, MemOperand(fp, indexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ Bind(&entry);
+ __ Ldr(x1, MemOperand(fp, limitOffset));
+ __ Cmp(current, x1);
+ __ B(ne, &loop);
+
+ // On exit, the pushed arguments count is in x0, untagged
+ __ SmiUntag(current);
+}
+
+
+static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
+ const int kFormalParameters = targetIsArgument ? 3 : 2;
+ const int kStackSize = kFormalParameters + 1;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ const int kFunctionOffset = kReceiverOffset + kPointerSize;
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+
Register args = x12;
Register receiver = x14;
Register function = x15;
// Get the length of the arguments via a builtin call.
__ Ldr(function, MemOperand(fp, kFunctionOffset));
- __ Ldr(args, MemOperand(fp, kArgsOffset));
+ __ Ldr(args, MemOperand(fp, kArgumentsOffset));
__ Push(function, args);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ if (targetIsArgument) {
+ __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ } else {
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ }
Register argc = x0;
- // Check the stack for overflow.
- // We are not trying to catch interruptions (e.g. debug break and
- // preemption) here, so the "real stack limit" is checked.
- Label enough_stack_space;
- __ LoadRoot(x10, Heap::kRealStackLimitRootIndex);
- __ Ldr(function, MemOperand(fp, kFunctionOffset));
- // Make x10 the space we have left. The stack might already be overflowed
- // here which will cause x10 to become negative.
- // TODO(jbramley): Check that the stack usage here is safe.
- __ Sub(x10, jssp, x10);
- // Check if the arguments will overflow the stack.
- __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
- __ B(gt, &enough_stack_space);
- // There is not enough stack space, so use a builtin to throw an appropriate
- // error.
- __ Push(function, argc);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- // We should never return from the APPLY_OVERFLOW builtin.
- if (__ emit_debug_code()) {
- __ Unreachable();
- }
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
- __ Bind(&enough_stack_space);
// Push current limit and index.
__ Mov(x1, 0); // Initial index.
__ Push(argc, x1);
@@ -1424,33 +1477,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ Push(receiver);
// Copy all arguments from the array to the stack.
- Label entry, loop;
- Register current = x0;
- __ Ldr(current, MemOperand(fp, kIndexOffset));
- __ B(&entry);
-
- __ Bind(&loop);
- // Load the current argument from the arguments array and push it.
- // TODO(all): Couldn't we optimize this for JS arrays?
-
- __ Ldr(x1, MemOperand(fp, kArgsOffset));
- __ Push(x1, current);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ Push(x0);
-
- // Use inline caching to access the arguments.
- __ Ldr(current, MemOperand(fp, kIndexOffset));
- __ Add(current, current, Smi::FromInt(1));
- __ Str(current, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ Bind(&entry);
- __ Ldr(x1, MemOperand(fp, kLimitOffset));
- __ Cmp(current, x1);
- __ B(ne, &loop);
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// At the end of the loop, the number of arguments is stored in 'current',
// represented as a smi.
@@ -1460,12 +1488,11 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Call the function.
Label call_proxy;
- ParameterCount actual(current);
- __ SmiUntag(current);
+ ParameterCount actual(x0);
__ JumpIfNotObjectType(function, x10, x11, JS_FUNCTION_TYPE, &call_proxy);
__ InvokeFunction(function, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
- __ Drop(3);
+ __ Drop(kStackSize);
__ Ret();
// Call the function proxy.
@@ -1479,11 +1506,93 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
- __ Drop(3);
+ __ Drop(kStackSize);
__ Ret();
}
+static void Generate_ConstructHelper(MacroAssembler* masm) {
+ const int kFormalParameters = 3;
+ const int kStackSize = kFormalParameters + 1;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+
+ const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
+ const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+
+ // Is x11 safe to use?
+ Register newTarget = x11;
+ Register args = x12;
+ Register function = x15;
+
+ // If newTarget is not supplied, set it to constructor
+ Label validate_arguments;
+ __ Ldr(x0, MemOperand(fp, kNewTargetOffset));
+ __ CompareRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ B(ne, &validate_arguments);
+ __ Ldr(x0, MemOperand(fp, kFunctionOffset));
+ __ Str(x0, MemOperand(fp, kNewTargetOffset));
+
+ // Validate arguments
+ __ Bind(&validate_arguments);
+ __ Ldr(function, MemOperand(fp, kFunctionOffset));
+ __ Ldr(args, MemOperand(fp, kArgumentsOffset));
+ __ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
+ __ Push(function, args, newTarget);
+ __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+ Register argc = x0;
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
+
+ // Push current limit and index, constructor & newTarget
+ __ Mov(x1, 0); // Initial index.
+ __ Ldr(newTarget, MemOperand(fp, kNewTargetOffset));
+ __ Push(argc, x1, newTarget, function);
+
+ // Copy all arguments from the array to the stack.
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+
+ __ Ldr(x1, MemOperand(fp, kFunctionOffset));
+ // Use undefined feedback vector
+ __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+
+ // Call the function.
+ CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+ }
+ __ Drop(kStackSize);
+ __ Ret();
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_FunctionApply");
+ Generate_ApplyHelper(masm, false);
+}
+
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_ReflectApply");
+ Generate_ApplyHelper(masm, true);
+}
+
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ ASM_LOCATION("Builtins::Generate_ReflectConstruct");
+ Generate_ConstructHelper(masm);
+}
+
+
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index c50a30a042..4247aec165 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -11,6 +11,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@@ -1188,28 +1189,28 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ Bind(&exception_returned);
- // Retrieve the pending exception.
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
- const Register& exception = result;
- const Register& exception_address = x11;
- __ Mov(exception_address, Operand(pending_exception_address));
- __ Ldr(exception, MemOperand(exception_address));
-
- // Clear the pending exception.
- __ Mov(x10, Operand(isolate()->factory()->the_hole_value()));
- __ Str(x10, MemOperand(exception_address));
-
- // x0 exception The exception descriptor.
- // x21 argv
- // x22 argc
- // x23 target
-
- // Special handling of termination exceptions, which are uncatchable by
- // JavaScript code.
- Label throw_termination_exception;
- __ Cmp(exception, Operand(isolate()->factory()->termination_exception()));
- __ B(eq, &throw_termination_exception);
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
+
+ // Ask the runtime for help to determine the handler. This will set x0 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
+ DCHECK(csp.Is(masm->StackPointer()));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ Mov(x0, 0); // argc.
+ __ Mov(x1, 0); // argv.
+ __ Mov(x2, ExternalReference::isolate_address(isolate()));
+ __ CallCFunction(find_handler, 3);
+ }
// We didn't execute a return case, so the stack frame hasn't been updated
// (except for the return address slot). However, we don't need to initialize
@@ -1217,18 +1218,29 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// unwinds the stack.
__ SetStackPointer(jssp);
- ASM_LOCATION("Throw normal");
- __ Mov(argv, 0);
- __ Mov(argc, 0);
- __ Mov(target, 0);
- __ Throw(x0, x10, x11, x12, x13);
-
- __ Bind(&throw_termination_exception);
- ASM_LOCATION("Throw termination");
- __ Mov(argv, 0);
- __ Mov(argc, 0);
- __ Mov(target, 0);
- __ ThrowUncatchable(x0, x10, x11, x12, x13);
+ // Retrieve the handler context, SP and FP.
+ __ Mov(cp, Operand(pending_handler_context_address));
+ __ Ldr(cp, MemOperand(cp));
+ __ Mov(jssp, Operand(pending_handler_sp_address));
+ __ Ldr(jssp, MemOperand(jssp));
+ __ Mov(fp, Operand(pending_handler_fp_address));
+ __ Ldr(fp, MemOperand(fp));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label not_js_frame;
+ __ Cbz(cp, &not_js_frame);
+ __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Bind(&not_js_frame);
+
+ // Compute the handler entry address and jump to it.
+ __ Mov(x10, Operand(pending_handler_code_address));
+ __ Ldr(x10, MemOperand(x10));
+ __ Mov(x11, Operand(pending_handler_offset_address));
+ __ Ldr(x11, MemOperand(x11));
+ __ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
+ __ Add(x10, x10, x11);
+ __ Br(x10);
}
@@ -1333,10 +1345,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ LoadRoot(x0, Heap::kExceptionRootIndex);
__ B(&exit);
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ Bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ __ PushStackHandler();
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the B(&invoke) above, which
// restores all callee-saved registers (including cp and fp) to their
@@ -1370,7 +1381,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Blr(x12);
// Unlink this frame from the handler chain.
- __ PopTryHandler();
+ __ PopStackHandler();
__ Bind(&exit);
@@ -1454,7 +1465,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ Bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -2062,9 +2073,13 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(param_count, param_count_smi);
if (has_new_target()) {
+ __ Cmp(param_count, Operand(0));
+ Label skip_decrement;
+ __ B(eq, &skip_decrement);
// Skip new.target: it is not a part of arguments.
__ Sub(param_count, param_count, Operand(1));
__ SmiTag(param_count_smi, param_count);
+ __ Bind(&skip_decrement);
}
__ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2));
__ Add(params, x10, StandardFrameConstants::kCallerSPOffset);
@@ -2209,7 +2224,7 @@ void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
void RegExpExecStub::Generate(MacroAssembler* masm) {
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2661,18 +2676,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Cmp(x10, exception_value);
__ B(eq, &runtime);
- __ Str(x10, MemOperand(x11)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- Label termination_exception;
- __ JumpIfRoot(exception_value,
- Heap::kTerminationExceptionRootIndex,
- &termination_exception);
-
- __ Throw(exception_value, x10, x11, x12, x13);
-
- __ Bind(&termination_exception);
- __ ThrowUncatchable(exception_value, x10, x11, x12, x13);
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ Bind(&failure);
__ Mov(x0, Operand(isolate()->factory()->null_value()));
@@ -2683,7 +2688,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Bind(&runtime);
__ PopCPURegList(used_callee_saved_registers);
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3299,7 +3304,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
+ MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@@ -3307,8 +3312,13 @@ void StringCharCodeAtGenerator::GenerateSlow(
// If index is a heap number, try converting it to an integer.
__ JumpIfNotHeapNumber(index_, index_not_number_);
call_helper.BeforeCall(masm);
- // Save object_ on the stack and pass index_ as argument for runtime call.
- __ Push(object_, index_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ } else {
+ // Save object_ on the stack and pass index_ as argument for runtime call.
+ __ Push(object_, index_);
+ }
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
@@ -3319,7 +3329,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Mov(index_, x0);
- __ Pop(object_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(object_, VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister());
+ } else {
+ __ Pop(object_);
+ }
// Reload the instance type.
__ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
@@ -3616,7 +3631,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ Bind(&miss);
@@ -3948,7 +3963,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Ret();
__ Bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// x1: result_length
@@ -4156,7 +4171,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime.
// Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@@ -4441,15 +4456,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorLoadStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawLoadStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorKeyedLoadStub stub(isolate());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawKeyedLoadStub stub(isolate());
+ stub.GenerateForTrampoline(masm);
}
@@ -4467,6 +4482,234 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
+void VectorRawLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register feedback, Register scratch1,
+ Register scratch2, Register scratch3,
+ bool is_polymorphic, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+
+ Register receiver_map = scratch1;
+ Register cached_map = scratch2;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Bind(&compare_map);
+ __ Ldr(cached_map,
+ FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+ __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Cmp(receiver_map, cached_map);
+ __ B(ne, &start_polymorphic);
+ // found, now call handler.
+ Register handler = feedback;
+ __ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(feedback);
+
+ Register length = scratch3;
+ __ Bind(&start_polymorphic);
+ __ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+ if (!is_polymorphic) {
+ __ Cmp(length, Operand(Smi::FromInt(2)));
+ __ B(eq, miss);
+ }
+
+ Register too_far = length;
+ Register pointer_reg = feedback;
+
+ // +-----+------+------+-----+-----+ ... ----+
+ // | map | len | wm0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch3
+ // also need receiver_map (aka scratch1)
+ // use cached_map (scratch2) to look in the weak map values.
+ __ Add(too_far, feedback,
+ Operand::UntagSmiAndScale(length, kPointerSizeLog2));
+ __ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add(pointer_reg, feedback,
+ FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
+
+ __ Bind(&next_loop);
+ __ Ldr(cached_map, MemOperand(pointer_reg));
+ __ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Cmp(receiver_map, cached_map);
+ __ B(ne, &prepare_next);
+ __ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
+ __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(handler);
+
+ __ Bind(&prepare_next);
+ __ Add(pointer_reg, pointer_reg, kPointerSize * 2);
+ __ Cmp(pointer_reg, too_far);
+ __ B(lt, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+
+ __ Bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register weak_cell, Register scratch,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+ Register receiver_map = scratch;
+ Register cached_map = weak_cell;
+
+ // Move the weak map into the weak_cell register.
+ __ Ldr(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Cmp(cached_map, receiver_map);
+ __ B(ne, miss);
+
+ Register handler = weak_cell;
+ __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(weak_cell);
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ // TODO(mvstanton): does this hold on ARM?
+ __ Bind(&compare_smi_map);
+ __ JumpIfNotRoot(weak_cell, Heap::kHeapNumberMapRootIndex, miss);
+ __ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(handler);
+}
+
+
+void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
+ Register name = VectorLoadICDescriptor::NameRegister(); // x2
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
+ Register feedback = x4;
+ Register scratch1 = x5;
+
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kWeakCellMapRootIndex, &try_array);
+ HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
+ &miss);
+
+ // Is it a fixed array?
+ __ Bind(&try_array);
+ __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
+ HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, x6,
+ x7, true, &miss);
+
+ __ Bind(&not_array);
+ __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex, &miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+ false, receiver, name, feedback,
+ scratch1, x6, x7);
+
+ __ Bind(&miss);
+ LoadIC::GenerateMiss(masm);
+}
+
+
+void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // x1
+ Register key = VectorLoadICDescriptor::NameRegister(); // x2
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // x3
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // x0
+ Register feedback = x4;
+ Register scratch1 = x5;
+
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ JumpIfNotRoot(scratch1, Heap::kWeakCellMapRootIndex, &try_array);
+ HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
+ &miss);
+
+ __ Bind(&try_array);
+ // Is it a fixed array?
+ __ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ Bind(&polymorphic);
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, x6,
+ x7, true, &miss);
+
+ __ Bind(&not_array);
+ // Is it generic?
+ __ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
+ &try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ Bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ Cmp(key, feedback);
+ __ B(ne, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
+ __ Ldr(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, x6,
+ x7, false, &miss);
+
+ __ Bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
+}
+
+
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@@ -5256,7 +5499,6 @@ static void CallApiFunctionAndReturn(
}
Label promote_scheduled_exception;
- Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
@@ -5278,6 +5520,7 @@ static void CallApiFunctionAndReturn(
__ Cmp(limit_reg, x1);
__ B(ne, &delete_allocated_handles);
+ // Leave the API exit frame.
__ Bind(&leave_exit_frame);
// Restore callee-saved registers.
__ Peek(x19, (spill_offset + 0) * kXRegSize);
@@ -5285,13 +5528,6 @@ static void CallApiFunctionAndReturn(
__ Peek(x21, (spill_offset + 2) * kXRegSize);
__ Peek(x22, (spill_offset + 3) * kXRegSize);
- // Check if the function scheduled an exception.
- __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
- __ Ldr(x5, MemOperand(x5));
- __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
- &promote_scheduled_exception);
- __ Bind(&exception_handled);
-
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
__ Ldr(cp, *context_restore_operand);
@@ -5302,6 +5538,13 @@ static void CallApiFunctionAndReturn(
}
__ LeaveExitFrame(false, x1, !restore_context);
+
+ // Check if the function scheduled an exception.
+ __ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
+ __ Ldr(x5, MemOperand(x5));
+ __ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
+ &promote_scheduled_exception);
+
if (stack_space_operand != NULL) {
__ Drop(x2, 1);
} else {
@@ -5309,13 +5552,9 @@ static void CallApiFunctionAndReturn(
}
__ Ret();
+ // Re-throw by promoting a scheduled exception.
__ Bind(&promote_scheduled_exception);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
- }
- __ B(&exception_handled);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
__ Bind(&delete_allocated_handles);
diff --git a/deps/v8/src/arm64/debug-arm64.cc b/deps/v8/src/arm64/debug-arm64.cc
index dae5a28434..56e3c031ed 100644
--- a/deps/v8/src/arm64/debug-arm64.cc
+++ b/deps/v8/src/arm64/debug-arm64.cc
@@ -15,12 +15,8 @@ namespace internal {
#define __ ACCESS_MASM(masm)
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
+void BreakLocation::SetDebugBreakAtReturn() {
// Patch the code emitted by FullCodeGenerator::EmitReturnSequence, changing
// the return from JS function sequence from
// mov sp, fp
@@ -39,8 +35,8 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// The patching code must not overflow the space occupied by the return
// sequence.
- STATIC_ASSERT(Assembler::kJSRetSequenceInstructions >= 5);
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 5);
+ STATIC_ASSERT(Assembler::kJSReturnSequenceInstructions >= 5);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 5);
byte* entry =
debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry();
@@ -59,27 +55,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
}
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- // Reset the code emitted by EmitReturnSequence to its original state.
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSRetSequenceInstructions);
-}
-
-
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
+void BreakLocation::SetDebugBreakAtSlot() {
// Patch the code emitted by DebugCodegen::GenerateSlots, changing the debug
// break slot code from
// mov x0, x0 @ nop DEBUG_BREAK_NOP
@@ -99,7 +75,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// The patching code must not overflow the space occupied by the return
// sequence.
STATIC_ASSERT(Assembler::kDebugBreakSlotInstructions >= 4);
- PatchingAssembler patcher(reinterpret_cast<Instruction*>(rinfo()->pc()), 4);
+ PatchingAssembler patcher(reinterpret_cast<Instruction*>(pc()), 4);
byte* entry =
debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry();
@@ -117,13 +93,6 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
}
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs,
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 8c4b776efe..b28d6f1d8b 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -115,7 +115,7 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
#define __ masm()->
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// TODO(all): This code needs to be revisited. We probably only need to save
diff --git a/deps/v8/src/arm64/frames-arm64.h b/deps/v8/src/arm64/frames-arm64.h
index 8d4ce86197..883079c9be 100644
--- a/deps/v8/src/arm64/frames-arm64.h
+++ b/deps/v8/src/arm64/frames-arm64.h
@@ -99,11 +99,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-inline void StackHandler::SetFp(Address slot, Address fp) {
- Memory::Address_at(slot) = fp;
-}
-
-
} } // namespace v8::internal
#endif // V8_ARM64_FRAMES_ARM64_H_
diff --git a/deps/v8/src/arm64/full-codegen-arm64.cc b/deps/v8/src/arm64/full-codegen-arm64.cc
index 63b6a98376..9feac938b5 100644
--- a/deps/v8/src/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/arm64/full-codegen-arm64.cc
@@ -105,7 +105,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@@ -196,7 +197,7 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in x1.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
- if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+ if (info->scope()->is_script_scope()) {
__ Mov(x10, Operand(info->scope()->GetScopeInfo(info->isolate())));
__ Push(x1, x10);
__ CallRuntime(Runtime::kNewScriptContext, 2);
@@ -241,6 +242,11 @@ void FullCodeGenerator::Generate() {
}
}
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
@@ -249,6 +255,11 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
+ if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
+ --num_parameters;
+ ++rest_index;
+ }
+
__ Add(x3, fp, StandardFrameConstants::kCallerSPOffset + offset);
__ Mov(x2, Smi::FromInt(num_parameters));
__ Mov(x1, Smi::FromInt(rest_index));
@@ -281,10 +292,6 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
@@ -456,10 +463,10 @@ void FullCodeGenerator::EmitReturnSequence() {
// Make sure that the constant pool is not emitted inside of the return
// sequence. This sequence can get patched when the debugger is used. See
- // debug-arm64.cc:BreakLocationIterator::SetDebugBreakAtReturn().
+ // debug-arm64.cc:BreakLocation::SetDebugBreakAtReturn().
{
InstructionAccurateScope scope(masm_,
- Assembler::kJSRetSequenceInstructions);
+ Assembler::kJSReturnSequenceInstructions);
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
// This code is generated using Assembler methods rather than Macro
@@ -1508,7 +1515,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ Mov(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(proxy->VariableFeedbackSlot()));
}
- CallLoadIC(CONTEXTUAL);
+ CallGlobalLoadIC(var->name());
context()->Plug(x0);
break;
}
@@ -2261,6 +2268,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
__ Push(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ Push(x0);
+ }
+
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@@ -2398,23 +2415,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
CallStoreIC();
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ Mov(x1, Operand(var->name()));
- __ Push(x0, cp, x1);
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, x1);
- __ Ldr(x10, location);
- __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ Bind(&skip);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2430,6 +2430,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ Bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &const_error);
+ __ Mov(x10, Operand(var->name()));
+ __ Push(x10);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ Bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2453,8 +2467,31 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ DCHECK(var->mode() == CONST_LEGACY);
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ Mov(x1, Operand(var->name()));
+ __ Push(x0, cp, x1);
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, x1);
+ __ Ldr(x10, location);
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ Bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ }
+ // Silently ignore store in sloppy mode.
}
}
@@ -2586,7 +2623,12 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
- __ Push(isolate()->factory()->undefined_value());
+ {
+ UseScratchRegisterScope temps(masm_);
+ Register temp = temps.AcquireX();
+ __ LoadRoot(temp, Heap::kUndefinedValueRootIndex);
+ __ Push(temp);
+ }
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2945,8 +2987,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- if (!ValidateSuperCall(expr)) return;
-
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(result_register(), new_target_var);
__ Push(result_register());
@@ -3467,9 +3507,10 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ Ldr(x12, FieldMemOperand(x10, Map::kConstructorOffset));
- __ JumpIfNotObjectType(x12, x13, x14, JS_FUNCTION_TYPE,
- &non_function_constructor);
+ Register instance_type = x14;
+ __ GetMapConstructor(x12, x10, x13, instance_type);
+ __ Cmp(instance_type, JS_FUNCTION_TYPE);
+ __ B(ne, &non_function_constructor);
// x12 now contains the constructor function. Grab the
// instance class name from there.
@@ -3764,7 +3805,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ B(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ Bind(&done);
context()->Plug(result);
@@ -3810,7 +3851,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ B(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ Bind(&done);
context()->Plug(result);
@@ -3985,7 +4026,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ Bind(&done);
context()->Plug(x0);
@@ -4254,18 +4295,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- if (expr->function() != NULL &&
- expr->function()->intrinsic_type == Runtime::INLINE) {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRunTime");
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
+ Comment cmnt(masm_, "[ CallRunTime");
// Push the builtins object as the receiver.
__ Ldr(x10, GlobalObjectMemOperand());
__ Ldr(LoadDescriptor::ReceiverRegister(),
@@ -4287,7 +4321,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Pop(x10);
__ Push(x0, x10);
- int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@@ -4302,15 +4335,29 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, x0);
+
} else {
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ const Runtime::Function* function = expr->function();
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: { \
+ Comment cmnt(masm_, "[ Inline" #Name); \
+ return Emit##Name(expr); \
+ }
+ FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- context()->Plug(x0);
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(x0);
+ }
+ }
}
}
@@ -4980,7 +5027,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ Bind(&l_catch);
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
__ Peek(x3, 1 * kPointerSize); // iter
__ Push(load_name, x3, x0); // "throw", iter, except
@@ -4991,8 +5037,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ Bind(&l_try);
__ Pop(x0); // result
- __ PushTryHandler(StackHandler::CATCH, expr->index());
- const int handler_size = StackHandlerConstants::kSize;
+ EnterTryBlock(expr->index(), &l_catch);
+ const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ Push(x0); // result
__ B(&l_suspend);
@@ -5003,9 +5049,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ B(&l_resume);
__ Bind(&l_suspend);
- const int generator_object_depth = kPointerSize + handler_size;
+ const int generator_object_depth = kPointerSize + try_block_size;
__ Peek(x0, generator_object_depth);
__ Push(x0); // g
+ __ Push(Smi::FromInt(expr->index())); // handler-index
DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
__ Mov(x1, Smi::FromInt(l_continuation.pos()));
__ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
@@ -5013,12 +5060,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Mov(x1, cp);
__ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Pop(x0); // result
EmitReturnSequence();
__ Bind(&l_resume); // received in x0
- __ PopTryHandler();
+ ExitTryBlock(expr->index());
// receiver = iter; f = 'next'; arg = received;
__ Bind(&l_next);
@@ -5288,20 +5335,6 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_pending_message_obj(isolate());
__ Mov(x10, pending_message_obj);
__ Ldr(x10, MemOperand(x10));
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
- __ Mov(x11, has_pending_message);
- __ Ldrb(x11, MemOperand(x11));
- __ SmiTag(x11);
-
- __ Push(x10, x11);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Mov(x10, pending_message_script);
- __ Ldr(x10, MemOperand(x10));
__ Push(x10);
}
@@ -5311,23 +5344,11 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(x10));
// Restore pending message from stack.
- __ Pop(x10, x11, x12);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Mov(x13, pending_message_script);
- __ Str(x10, MemOperand(x13));
-
- __ SmiUntag(x11);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ Mov(x13, has_pending_message);
- STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
- __ Strb(x11, MemOperand(x13));
-
+ __ Pop(x10);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Mov(x13, pending_message_obj);
- __ Str(x12, MemOperand(x13));
+ __ Str(x10, MemOperand(x13));
// Restore result register and cooked return address from the stack.
__ Pop(x10, result_register());
@@ -5437,37 +5458,6 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
}
-#define __ ACCESS_MASM(masm())
-
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- ASM_LOCATION("FullCodeGenerator::TryFinally::Exit");
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ Peek(cp, StackHandlerConstants::kContextOffset);
- __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ PopTryHandler();
- __ Bl(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/src/arm64/instructions-arm64.cc b/deps/v8/src/arm64/instructions-arm64.cc
index 71094baa87..7a5effe427 100644
--- a/deps/v8/src/arm64/instructions-arm64.cc
+++ b/deps/v8/src/arm64/instructions-arm64.cc
@@ -191,6 +191,9 @@ int64_t Instruction::ImmPCOffset() {
// All PC-relative branches.
// Relative branch offsets are instruction-size-aligned.
offset = ImmBranch() << kInstructionSizeLog2;
+ } else if (IsUnresolvedInternalReference()) {
+ // Internal references are always word-aligned.
+ offset = ImmUnresolvedInternalReference() << kInstructionSizeLog2;
} else {
// Load literal (offset from PC).
DCHECK(IsLdrLiteral());
@@ -223,7 +226,10 @@ void Instruction::SetImmPCOffsetTarget(Instruction* target) {
SetPCRelImmTarget(target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
+ } else if (IsUnresolvedInternalReference()) {
+ SetUnresolvedInternalReferenceImmTarget(target);
} else {
+ // Load literal (offset from PC).
SetImmLLiteral(target);
}
}
@@ -278,7 +284,23 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
}
+void Instruction::SetUnresolvedInternalReferenceImmTarget(Instruction* target) {
+ DCHECK(IsUnresolvedInternalReference());
+ DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
+
+ ptrdiff_t target_offset = DistanceTo(target) >> kInstructionSizeLog2;
+ DCHECK(is_int32(target_offset));
+ uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
+ uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
+
+ PatchingAssembler patcher(this, 2);
+ patcher.brk(high16);
+ patcher.brk(low16);
+}
+
+
void Instruction::SetImmLLiteral(Instruction* source) {
+ DCHECK(IsLdrLiteral());
DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
Instr imm = Assembler::ImmLLiteral(offset);
diff --git a/deps/v8/src/arm64/instructions-arm64.h b/deps/v8/src/arm64/instructions-arm64.h
index 374e2464c3..142b7c11d4 100644
--- a/deps/v8/src/arm64/instructions-arm64.h
+++ b/deps/v8/src/arm64/instructions-arm64.h
@@ -121,10 +121,18 @@ class Instruction {
return InstructionBits() & mask;
}
+ V8_INLINE const Instruction* following(int count = 1) const {
+ return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
+ }
+
V8_INLINE Instruction* following(int count = 1) {
return InstructionAtOffset(count * static_cast<int>(kInstructionSize));
}
+ V8_INLINE const Instruction* preceding(int count = 1) const {
+ return following(-count);
+ }
+
V8_INLINE Instruction* preceding(int count = 1) {
return following(-count);
}
@@ -189,6 +197,14 @@ class Instruction {
return Mask(PCRelAddressingMask) == ADR;
}
+ bool IsBrk() const { return Mask(ExceptionMask) == BRK; }
+
+ bool IsUnresolvedInternalReference() const {
+ // Unresolved internal references are encoded as two consecutive brk
+ // instructions.
+ return IsBrk() && following()->IsBrk();
+ }
+
bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
}
@@ -306,6 +322,15 @@ class Instruction {
return 0;
}
+ int ImmUnresolvedInternalReference() const {
+ DCHECK(IsUnresolvedInternalReference());
+ // Unresolved references are encoded as two consecutive brk instructions.
+ // The associated immediate is made of the two 16-bit payloads.
+ int32_t high16 = ImmException();
+ int32_t low16 = following()->ImmException();
+ return (high16 << 16) | low16;
+ }
+
bool IsBranchAndLinkToRegister() const {
return Mask(UnconditionalBranchToRegisterMask) == BLR;
}
@@ -349,6 +374,7 @@ class Instruction {
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(Instruction* target);
+ void SetUnresolvedInternalReferenceImmTarget(Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
@@ -359,13 +385,18 @@ class Instruction {
enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
+ V8_INLINE const Instruction* InstructionAtOffset(
+ int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) const {
+ // The FUZZ_disasm test relies on no check being done.
+ DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
+ return this + offset;
+ }
+
V8_INLINE Instruction* InstructionAtOffset(
- int64_t offset,
- CheckAlignment check = CHECK_ALIGNMENT) {
- Address addr = reinterpret_cast<Address>(this) + offset;
+ int64_t offset, CheckAlignment check = CHECK_ALIGNMENT) {
// The FUZZ_disasm test relies on no check being done.
- DCHECK(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
- return Cast(addr);
+ DCHECK(check == NO_CHECK || IsAligned(offset, kInstructionSize));
+ return this + offset;
}
template<typename T> V8_INLINE static Instruction* Cast(T src) {
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index 6deeabfcf5..773dec4bca 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -261,6 +261,15 @@ void InternalArrayConstructorDescriptor::Initialize(
}
+void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ // cp: context
+ // x1: left operand
+ // x0: right operand
+ Register registers[] = {cp, x1, x0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value to compare
diff --git a/deps/v8/src/arm64/lithium-arm64.cc b/deps/v8/src/arm64/lithium-arm64.cc
index 0234fcddca..4a89fc4344 100644
--- a/deps/v8/src/arm64/lithium-arm64.cc
+++ b/deps/v8/src/arm64/lithium-arm64.cc
@@ -1692,14 +1692,6 @@ LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
}
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell();
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object =
@@ -2351,18 +2343,6 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- if (instr->RequiresHoleCheck()) {
- return AssignEnvironment(new(zone()) LStoreGlobalCell(value,
- TempRegister(),
- TempRegister()));
- } else {
- return new(zone()) LStoreGlobalCell(value, TempRegister(), NULL);
- }
-}
-
-
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LOperand* key = UseRegisterOrConstant(instr->key());
LOperand* temp = NULL;
diff --git a/deps/v8/src/arm64/lithium-arm64.h b/deps/v8/src/arm64/lithium-arm64.h
index 8b48729302..0549689e79 100644
--- a/deps/v8/src/arm64/lithium-arm64.h
+++ b/deps/v8/src/arm64/lithium-arm64.h
@@ -102,7 +102,6 @@ class LCodeGen;
V(LoadContextSlot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyedExternal) \
V(LoadKeyedFixed) \
@@ -151,7 +150,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalCell) \
V(StoreKeyedExternal) \
V(StoreKeyedFixed) \
V(StoreKeyedFixedDouble) \
@@ -1731,13 +1729,6 @@ class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
};
-class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@@ -2809,23 +2800,6 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
};
-class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 2> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
public:
LSubI(LOperand* left, LOperand* right)
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.cc b/deps/v8/src/arm64/lithium-codegen-arm64.cc
index ef01c91d47..cd331db249 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -434,7 +435,6 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
CallFunctionStub stub(isolate(), arity, flags);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
- after_push_argument_ = false;
}
@@ -449,7 +449,6 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
- after_push_argument_ = false;
DCHECK(ToRegister(instr->result()).is(x0));
}
@@ -497,7 +496,6 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
- after_push_argument_ = false;
DCHECK(ToRegister(instr->result()).is(x0));
}
@@ -519,7 +517,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
__ Mov(cp, ToRegister(context));
} else if (context->IsStackSlot()) {
- __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
+ __ Ldr(cp, ToMemOperand(context));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
@@ -662,7 +660,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
+ if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kXRegSize;
@@ -841,7 +839,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateJumpTable() {
- Label needs_frame, restore_caller_doubles, call_deopt_entry;
+ Label needs_frame, call_deopt_entry;
if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
@@ -863,55 +861,52 @@ bool LCodeGen::GenerateJumpTable() {
// address and add an immediate offset.
__ Mov(entry_offset, entry - base);
- // The last entry can fall through into `call_deopt_entry`, avoiding a
- // branch.
- bool last_entry = (i + 1) == length;
-
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
- if (!needs_frame.is_bound()) {
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
-
- UseScratchRegisterScope temps(masm());
- Register stub_marker = temps.AcquireX();
- __ Bind(&needs_frame);
- __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
- __ Push(lr, fp, cp, stub_marker);
- __ Add(fp, __ StackPointer(), 2 * kPointerSize);
- if (!last_entry) __ B(&call_deopt_entry);
- } else {
- // Reuse the existing needs_frame code.
- __ B(&needs_frame);
- }
- } else if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- if (!restore_caller_doubles.is_bound()) {
- __ Bind(&restore_caller_doubles);
- RestoreCallerDoubles();
- if (!last_entry) __ B(&call_deopt_entry);
- } else {
- // Reuse the existing restore_caller_doubles code.
- __ B(&restore_caller_doubles);
- }
+ Comment(";;; call deopt with frame");
+ // Save lr before Bl, fp will be adjusted in the needs_frame code.
+ __ Push(lr, fp);
+ // Reuse the existing needs_frame code.
+ __ Bl(&needs_frame);
} else {
// There is nothing special to do, so just continue to the second-level
// table.
- if (!last_entry) __ B(&call_deopt_entry);
+ __ Bl(&call_deopt_entry);
}
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
- masm()->CheckConstPool(false, last_entry);
+ masm()->CheckConstPool(false, false);
+ }
+
+ if (needs_frame.is_linked()) {
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+
+ Comment(";;; needs_frame common code");
+ UseScratchRegisterScope temps(masm());
+ Register stub_marker = temps.AcquireX();
+ __ Bind(&needs_frame);
+ __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
+ __ Push(cp, stub_marker);
+ __ Add(fp, __ StackPointer(), 2 * kPointerSize);
}
// Generate common code for calling the second-level deopt table.
- Register deopt_entry = temps.AcquireX();
__ Bind(&call_deopt_entry);
+
+ if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+
+ Register deopt_entry = temps.AcquireX();
__ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
RelocInfo::RUNTIME_ENTRY));
__ Add(deopt_entry, deopt_entry, entry_offset);
- __ Call(deopt_entry);
+ __ Br(deopt_entry);
}
// Force constant pool emission at the end of the deopt jump table to make
@@ -1057,14 +1052,15 @@ void LCodeGen::DeoptimizeBranch(
__ Bind(&dont_trap);
}
- Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to build frame, or restore caller doubles.
if (branch_type == always &&
frame_is_built_ && !info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry* table_entry =
new (zone()) Deoptimizer::JumpTableEntry(
@@ -1151,7 +1147,7 @@ void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
__ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber);
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
}
@@ -1276,38 +1272,13 @@ static int64_t ArgumentsOffsetWithoutFrame(int index) {
}
-MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
DCHECK(op != NULL);
DCHECK(!op->IsRegister());
DCHECK(!op->IsDoubleRegister());
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
- int fp_offset = StackSlotOffset(op->index());
- if (op->index() >= 0) {
- // Loads and stores have a bigger reach in positive offset than negative.
- // When the load or the store can't be done in one instruction via fp
- // (too big negative offset), we try to access via jssp (positive offset).
- // We can reference a stack slot from jssp only if jssp references the end
- // of the stack slots. It's not the case when:
- // - stack_mode != kCanUseStackPointer: this is the case when a deferred
- // code saved the registers.
- // - after_push_argument_: arguments has been pushed for a call.
- // - inlined_arguments_: inlined arguments have been pushed once. All the
- // remainder of the function cannot trust jssp any longer.
- // - saves_caller_doubles: some double registers have been pushed, jssp
- // references the end of the double registers and not the end of the
- // stack slots.
- // Also, if the offset from fp is small enough to make a load/store in
- // one instruction, we use a fp access.
- if ((stack_mode == kCanUseStackPointer) && !after_push_argument_ &&
- !inlined_arguments_ && !is_int9(fp_offset) &&
- !info()->saves_caller_doubles()) {
- int jssp_offset =
- (GetStackSlotCount() - op->index() - 1) * kPointerSize;
- return MemOperand(masm()->StackPointer(), jssp_offset);
- }
- }
- return MemOperand(fp, fp_offset);
+ return MemOperand(fp, StackSlotOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
@@ -1711,10 +1682,6 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
- // We push some arguments and they will be pop in an other block. We can't
- // trust that jssp references the end of the stack slots until the end of
- // the function.
- inlined_arguments_ = true;
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
@@ -2131,8 +2098,6 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
}
generator.AfterCall();
}
-
- after_push_argument_ = false;
}
@@ -2152,13 +2117,11 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
__ Call(x10);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
- after_push_argument_ = false;
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
CallRuntime(instr->function(), instr->arity(), instr);
- after_push_argument_ = false;
}
@@ -2184,7 +2147,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
default:
UNREACHABLE();
}
- after_push_argument_ = false;
}
@@ -2437,15 +2399,17 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
-
+ {
+ UseScratchRegisterScope temps(masm());
+ Register instance_type = temps.AcquireX();
+ __ GetMapConstructor(scratch1, map, scratch2, instance_type);
+ __ Cmp(instance_type, JS_FUNCTION_TYPE);
+ }
// Objects with a non-function constructor have class 'Object'.
if (String::Equals(class_name, isolate()->factory()->Object_string())) {
- __ JumpIfNotObjectType(
- scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
+ __ B(ne, true_label);
} else {
- __ JumpIfNotObjectType(
- scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, false_label);
+ __ B(ne, false_label);
}
// The constructor function is in scratch1. Get its instance class name.
@@ -2664,7 +2628,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
UseScratchRegisterScope temps(masm());
Register temp = temps.AcquireX();
Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ Mov(temp, Operand(Handle<Object>(cell)));
+ __ Mov(temp, Operand(cell));
__ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
__ Cmp(reg, temp);
} else {
@@ -3139,8 +3103,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
__ bind(&map_check);
// Will be patched with the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ ldr(scratch, Immediate(Handle<Object>(cell)));
- __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ ldr(scratch, Immediate(cell));
+ __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
__ cmp(map, scratch);
__ b(&cache_miss, ne);
// The address of this instruction is computed relative to the map check
@@ -3238,7 +3202,6 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr);
}
- after_push_argument_ = false;
}
@@ -3402,17 +3365,6 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
}
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
- __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
- }
-}
-
-
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@@ -3441,7 +3393,8 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3693,7 +3646,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).Is(x0));
@@ -3750,8 +3705,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL,
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
DCHECK(ToRegister(instr->result()).is(x0));
@@ -4787,8 +4743,6 @@ void LCodeGen::DoPushArguments(LPushArguments* instr) {
// The preamble was done by LPreparePushArguments.
args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
-
- after_push_argument_ = true;
}
@@ -5185,30 +5139,6 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
}
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Register cell = ToRegister(instr->temp1());
-
- // Load the cell.
- __ Mov(cell, Operand(instr->hydrogen()->cell().handle()));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- Register payload = ToRegister(instr->temp2());
- __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
- DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr,
- Deoptimizer::kHole);
- }
-
- // Store the value.
- __ Str(value, FieldMemOperand(cell, Cell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-}
-
-
void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
Register ext_ptr = ToRegister(instr->elements());
Register key = no_reg;
@@ -5381,8 +5311,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5492,7 +5423,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
+ Handle<Code> ic =
+ StoreIC::initialize_stub(isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
diff --git a/deps/v8/src/arm64/lithium-codegen-arm64.h b/deps/v8/src/arm64/lithium-codegen-arm64.h
index fe16a4e59c..d94262e74d 100644
--- a/deps/v8/src/arm64/lithium-codegen-arm64.h
+++ b/deps/v8/src/arm64/lithium-codegen-arm64.h
@@ -37,16 +37,10 @@ class LCodeGen: public LCodeGenBase {
frame_is_built_(false),
safepoints_(info->zone()),
resolver_(this),
- expected_safepoint_kind_(Safepoint::kSimple),
- after_push_argument_(false),
- inlined_arguments_(false) {
+ expected_safepoint_kind_(Safepoint::kSimple) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
- ~LCodeGen() {
- DCHECK(!after_push_argument_ || inlined_arguments_);
- }
-
// Simple accessors.
Scope* scope() const { return scope_; }
@@ -87,9 +81,7 @@ class LCodeGen: public LCodeGenBase {
Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op);
Operand ToOperand32(LOperand* op);
- enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
- MemOperand ToMemOperand(LOperand* op,
- StackMode stack_mode = kCanUseStackPointer) const;
+ MemOperand ToMemOperand(LOperand* op) const;
Handle<Object> ToHandle(LConstantOperand* op) const;
template <class LI>
@@ -366,15 +358,6 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_;
- // This flag is true when we are after a push (but before a call).
- // In this situation, jssp no longer references the end of the stack slots so,
- // we can only reference a stack slot via fp.
- bool after_push_argument_;
- // If we have inlined arguments, we are no longer able to use jssp because
- // jssp is modified and we never know if we are in a block after or before
- // the pop of the arguments (which restores jssp).
- bool inlined_arguments_;
-
int old_position_;
class PushSafepointRegistersScope BASE_EMBEDDED {
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 8b559755a0..5df2e5a46d 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -1403,6 +1403,7 @@ void MacroAssembler::LoadRoot(CPURegister destination,
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Str(source, MemOperand(root, index << kPointerSizeLog2));
}
@@ -1549,27 +1550,6 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
}
-void MacroAssembler::JumpToHandlerEntry(Register exception,
- Register object,
- Register state,
- Register scratch1,
- Register scratch2) {
- // Handler expects argument in x0.
- DCHECK(exception.Is(x0));
-
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
- Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
- STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
- Lsr(scratch2, state, StackHandler::kKindWidth);
- Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
- Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
- Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
- Br(scratch1);
-}
-
-
void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
@@ -1582,95 +1562,6 @@ void MacroAssembler::InNewSpace(Register object,
}
-void MacroAssembler::Throw(Register value,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The handler expects the exception in x0.
- DCHECK(value.Is(x0));
-
- // Drop the stack pointer to the top of the top handler.
- DCHECK(jssp.Is(StackPointer()));
- Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
- isolate())));
- Ldr(jssp, MemOperand(scratch1));
- // Restore the next handler.
- Pop(scratch2);
- Str(scratch2, MemOperand(scratch1));
-
- // Get the code object and state. Restore the context and frame pointer.
- Register object = scratch1;
- Register state = scratch2;
- Pop(object, state, cp, fp);
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
- // or cp.
- Label not_js_frame;
- Cbz(cp, &not_js_frame);
- Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- Bind(&not_js_frame);
-
- JumpToHandlerEntry(value, object, state, scratch3, scratch4);
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The handler expects the exception in x0.
- DCHECK(value.Is(x0));
-
- // Drop the stack pointer to the top of the top stack handler.
- DCHECK(jssp.Is(StackPointer()));
- Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
- isolate())));
- Ldr(jssp, MemOperand(scratch1));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label fetch_next, check_kind;
- B(&check_kind);
- Bind(&fetch_next);
- Peek(jssp, StackHandlerConstants::kNextOffset);
-
- Bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- Peek(scratch2, StackHandlerConstants::kStateOffset);
- TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- Pop(scratch2);
- Str(scratch2, MemOperand(scratch1));
-
- // Get the code object and state. Clear the context and frame pointer (0 was
- // saved in the handler).
- Register object = scratch1;
- Register state = scratch2;
- Pop(object, state, cp, fp);
-
- JumpToHandlerEntry(value, object, state, scratch3, scratch4);
-}
-
-
void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@@ -3147,46 +3038,26 @@ void MacroAssembler::DebugBreak() {
}
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
+void MacroAssembler::PushStackHandler() {
DCHECK(jssp.Is(StackPointer()));
// Adjust this code if the asserts don't hold.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// For the JSEntry handler, we must preserve the live registers x0-x4.
// (See JSEntryStub::GenerateBody().)
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
-
- // Set up the code object and the state for pushing.
- Mov(x10, Operand(CodeObject()));
- Mov(x11, state);
-
- // Push the frame pointer, context, state, and code object.
- if (kind == StackHandler::JS_ENTRY) {
- DCHECK(Smi::FromInt(0) == 0);
- Push(xzr, xzr, x11, x10);
- } else {
- Push(fp, cp, x11, x10);
- }
-
// Link the current handler as the next handler.
Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
Ldr(x10, MemOperand(x11));
Push(x10);
+
// Set this new handler as the current one.
Str(jssp, MemOperand(x11));
}
-void MacroAssembler::PopTryHandler() {
+void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
Pop(x10);
Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
@@ -3705,6 +3576,20 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
}
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+ Register temp, Register temp2) {
+ Label done, loop;
+ Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
+ Bind(&loop);
+ JumpIfSmi(result, &done);
+ CompareObjectType(result, temp, temp2, MAP_TYPE);
+ B(ne, &done);
+ Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
+ B(&loop);
+ Bind(&done);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -3756,7 +3641,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Non-instance prototype: fetch prototype from constructor field in initial
// map.
Bind(&non_instance);
- Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ GetMapConstructor(result, result, scratch, scratch);
}
// All done.
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index 3a0df0b049..3937f6f9bb 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -1078,22 +1078,6 @@ class MacroAssembler : public Assembler {
// This is required for compatibility in architecture indepenedant code.
inline void jmp(Label* L) { B(L); }
- // Passes thrown value to the handler of top of the try handler chain.
- // Register value must be x0.
- void Throw(Register value,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain. Register value must be x0.
- void ThrowUncatchable(Register value,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
void TailCallStub(CodeStub* stub);
@@ -1289,12 +1273,12 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
- // Unlink the stack handler on top of the stack from the try handler chain.
+ // Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
- void PopTryHandler();
+ void PopStackHandler();
// ---------------------------------------------------------------------------
@@ -1378,6 +1362,11 @@ class MacroAssembler : public Assembler {
kDontMissOnBoundFunction
};
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done, and |temp2| its instance type.
+ void GetMapConstructor(Register result, Register map, Register temp,
+ Register temp2);
+
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -2070,14 +2059,6 @@ class MacroAssembler : public Assembler {
// have mixed types. The format string (x0) should not be included.
void CallPrintf(int arg_count = 0, const CPURegister * args = NULL);
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry(Register exception,
- Register object,
- Register state,
- Register scratch1,
- Register scratch2);
-
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Condition cond, // eq for new space, ne otherwise.
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 784f51f0e4..12d2b4c22a 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -240,7 +240,7 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
var new_array = new InternalArray(
// Clamp array length to 2^32-1 to avoid early RangeError.
- MathMin(len - del_count + num_additional_args, 0xffffffff));
+ $min(len - del_count + num_additional_args, 0xffffffff));
var big_indices;
var indices = %GetArrayKeys(array, len);
if (IS_NUMBER(indices)) {
diff --git a/deps/v8/src/arraybuffer.js b/deps/v8/src/arraybuffer.js
index cf00693be7..cbb51433bb 100644
--- a/deps/v8/src/arraybuffer.js
+++ b/deps/v8/src/arraybuffer.js
@@ -39,16 +39,16 @@ function ArrayBufferSlice(start, end) {
var first;
var byte_length = %_ArrayBufferGetByteLength(this);
if (relativeStart < 0) {
- first = MathMax(byte_length + relativeStart, 0);
+ first = $max(byte_length + relativeStart, 0);
} else {
- first = MathMin(relativeStart, byte_length);
+ first = $min(relativeStart, byte_length);
}
var relativeEnd = IS_UNDEFINED(end) ? byte_length : end;
var fin;
if (relativeEnd < 0) {
- fin = MathMax(byte_length + relativeEnd, 0);
+ fin = $max(byte_length + relativeEnd, 0);
} else {
- fin = MathMin(relativeEnd, byte_length);
+ fin = $min(relativeEnd, byte_length);
}
if (fin < first) {
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index f4299ed717..ffee104619 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -54,7 +54,7 @@
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"
#include "src/runtime/runtime.h"
-#include "src/serialize.h"
+#include "src/snapshot/serialize.h"
#include "src/token.h"
#if V8_TARGET_ARCH_IA32
@@ -292,11 +292,6 @@ int Label::pos() const {
// (Bits 6..31 of pc delta, with leading zeroes
// dropped, and last non-zero chunk tagged with 1.)
-
-#ifdef DEBUG
-const int kMaxStandardNonCompactModes = 14;
-#endif
-
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
const int kExtraTagBits = 4;
@@ -452,8 +447,6 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#endif
DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
DCHECK(rinfo->pc() - last_pc_ >= 0);
- DCHECK(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
- <= kMaxStandardNonCompactModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
@@ -465,7 +458,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
// Use signed delta-encoding for id.
- DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
+ DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
int id_delta = static_cast<int>(rinfo->data()) - last_id_;
// Check if delta is small enough to fit in a tagged byte.
if (is_intn(id_delta, kSmallDataBits)) {
@@ -483,12 +476,12 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteTaggedData(rinfo->data(), kDeoptReasonTag);
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for position.
- DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
+ DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
if (rmode == RelocInfo::STATEMENT_POSITION) {
WritePosition(pc_delta, pos_delta, rmode);
} else {
- DCHECK(rmode == RelocInfo::POSITION);
+ DCHECK_EQ(rmode, RelocInfo::POSITION);
if (pc_delta != 0 || last_mode_ != RelocInfo::POSITION) {
FlushPosition();
next_position_candidate_pc_delta_ = pc_delta;
@@ -511,10 +504,14 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
: kVeneerPoolTag);
} else {
DCHECK(rmode > RelocInfo::LAST_COMPACT_ENUM);
- int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
+ DCHECK(rmode <= RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM);
+ STATIC_ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM -
+ RelocInfo::LAST_COMPACT_ENUM <=
+ kPoolExtraTag);
+ int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM - 1;
// For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component.
- DCHECK(saved_mode < kPoolExtraTag);
+ DCHECK(0 <= saved_mode && saved_mode < kPoolExtraTag);
WriteExtraTaggedPC(pc_delta, saved_mode);
}
last_pc_ = rinfo->pc();
@@ -721,7 +718,7 @@ void RelocIterator::next() {
Advance(kIntSize);
} else {
AdvanceReadPC();
- int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
+ int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM + 1;
if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
}
}
@@ -832,6 +829,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "external reference";
case RelocInfo::INTERNAL_REFERENCE:
return "internal reference";
+ case RelocInfo::INTERNAL_REFERENCE_ENCODED:
+ return "encoded internal reference";
case RelocInfo::DEOPT_REASON:
return "deopt reason";
case RelocInfo::CONST_POOL:
@@ -861,8 +860,10 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
os << " (" << Brief(target_object()) << ")";
} else if (rmode_ == EXTERNAL_REFERENCE) {
ExternalReferenceEncoder ref_encoder(isolate);
- os << " (" << ref_encoder.NameOfAddress(target_reference()) << ") ("
- << static_cast<const void*>(target_reference()) << ")";
+ os << " ("
+ << ref_encoder.NameOfAddress(isolate, target_external_reference())
+ << ") (" << static_cast<const void*>(target_external_reference())
+ << ")";
} else if (IsCodeTarget(rmode_)) {
Code* code = Code::GetCodeFromTargetAddress(target_address());
os << " (" << Code::Kind2String(code->kind()) << ") ("
@@ -910,13 +911,21 @@ void RelocInfo::Verify(Isolate* isolate) {
CHECK(code->address() == HeapObject::cast(found)->address());
break;
}
+ case INTERNAL_REFERENCE:
+ case INTERNAL_REFERENCE_ENCODED: {
+ Address target = target_internal_reference();
+ Address pc = target_internal_reference_address();
+ Code* code = Code::cast(isolate->FindCodeObject(pc));
+ CHECK(target >= code->instruction_start());
+ CHECK(target <= code->instruction_end());
+ break;
+ }
case RUNTIME_ENTRY:
case JS_RETURN:
case COMMENT:
case POSITION:
case STATEMENT_POSITION:
case EXTERNAL_REFERENCE:
- case INTERNAL_REFERENCE:
case DEOPT_REASON:
case CONST_POOL:
case VENEER_POOL:
@@ -1223,8 +1232,7 @@ ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
ExternalReference ExternalReference::old_data_space_allocation_top_address(
Isolate* isolate) {
- return ExternalReference(
- isolate->heap()->OldDataSpaceAllocationTopAddress());
+ return ExternalReference(isolate->heap()->OldDataSpaceAllocationTopAddress());
}
@@ -1265,18 +1273,6 @@ ExternalReference ExternalReference::address_of_pending_message_obj(
}
-ExternalReference ExternalReference::address_of_has_pending_message(
- Isolate* isolate) {
- return ExternalReference(isolate->has_pending_message_address());
-}
-
-
-ExternalReference ExternalReference::address_of_pending_message_script(
- Isolate* isolate) {
- return ExternalReference(isolate->pending_message_script_address());
-}
-
-
ExternalReference ExternalReference::address_of_min_int() {
return ExternalReference(reinterpret_cast<void*>(&double_constants.min_int));
}
@@ -1656,9 +1652,11 @@ bool PositionsRecorder::WriteRecordedPositions() {
// Platform specific but identical code for all the platforms.
-void Assembler::RecordDeoptReason(const int reason, const int raw_position) {
+void Assembler::RecordDeoptReason(const int reason,
+ const SourcePosition position) {
if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling()) {
EnsureSpace ensure_space(this);
+ int raw_position = position.IsUnknown() ? 0 : position.raw();
RecordRelocInfo(RelocInfo::POSITION, raw_position);
RecordRelocInfo(RelocInfo::DEOPT_REASON, reason);
}
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index eb00f8a7e6..ab37cd9dab 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -379,6 +379,9 @@ class RelocInfo {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
+ // Encoded internal reference, used only on MIPS, MIPS64 and PPC.
+ INTERNAL_REFERENCE_ENCODED,
+
// Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding.
CONST_POOL,
@@ -394,10 +397,6 @@ class RelocInfo {
CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
// code aging.
- // Encoded internal reference, used only on MIPS and MIPS64.
- // Re-uses previous ARM-only encoding, to fit in RealRelocMode space.
- INTERNAL_REFERENCE_ENCODED = CONST_POOL,
-
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
@@ -406,7 +405,7 @@ class RelocInfo {
LAST_GCED_ENUM = CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID,
- LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE
+ LAST_STANDARD_NONCOMPACT_ENUM = INTERNAL_REFERENCE_ENCODED
};
RelocInfo() {}
@@ -476,6 +475,9 @@ class RelocInfo {
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
}
+ static inline bool IsDebuggerStatement(Mode mode) {
+ return mode == DEBUG_BREAK;
+ }
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
@@ -575,9 +577,17 @@ class RelocInfo {
// place, ready to be patched with the target.
INLINE(int target_address_size());
- // Read/modify the reference in the instruction this relocation
- // applies to; can only be called if rmode_ is external_reference
- INLINE(Address target_reference());
+ // Read the reference in the instruction this relocation
+ // applies to; can only be called if rmode_ is EXTERNAL_REFERENCE.
+ INLINE(Address target_external_reference());
+
+ // Read the reference in the instruction this relocation
+ // applies to; can only be called if rmode_ is INTERNAL_REFERENCE.
+ INLINE(Address target_internal_reference());
+
+ // Return the reference address this relocation applies to;
+ // can only be called if rmode_ is INTERNAL_REFERENCE.
+ INLINE(Address target_internal_reference_address());
// Read/modify the address of a call instruction. This is used to relocate
// the break points where straight-line code is patched with a call
@@ -595,9 +605,6 @@ class RelocInfo {
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(Isolate* isolate, ObjectVisitor* v);
- // Patch the code with some other code.
- void PatchCode(byte* instructions, int instruction_count);
-
// Patch the code with a call.
void PatchCodeWithCall(Address target, int guard_bytes);
@@ -951,8 +958,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference scheduled_exception_address(Isolate* isolate);
static ExternalReference address_of_pending_message_obj(Isolate* isolate);
- static ExternalReference address_of_has_pending_message(Isolate* isolate);
- static ExternalReference address_of_pending_message_script(Isolate* isolate);
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
diff --git a/deps/v8/src/ast-numbering.cc b/deps/v8/src/ast-numbering.cc
index ea25ad8bbc..f63919c058 100644
--- a/deps/v8/src/ast-numbering.cc
+++ b/deps/v8/src/ast-numbering.cc
@@ -6,7 +6,6 @@
#include "src/ast.h"
#include "src/ast-numbering.h"
-#include "src/compiler.h"
#include "src/scopes.h"
namespace v8 {
@@ -18,6 +17,8 @@ class AstNumberingVisitor FINAL : public AstVisitor {
explicit AstNumberingVisitor(Isolate* isolate, Zone* zone)
: AstVisitor(),
next_id_(BailoutId::FirstUsable().ToInt()),
+ properties_(zone),
+ ic_slot_cache_(FLAG_vector_ics ? 4 : 0),
dont_optimize_reason_(kNoReason) {
InitializeAstVisitor(isolate, zone);
}
@@ -60,14 +61,15 @@ class AstNumberingVisitor FINAL : public AstVisitor {
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
FeedbackVectorRequirements reqs =
- node->ComputeFeedbackRequirements(isolate());
+ node->ComputeFeedbackRequirements(isolate(), &ic_slot_cache_);
if (reqs.slots() > 0) {
node->SetFirstFeedbackSlot(FeedbackVectorSlot(properties_.slots()));
properties_.increase_slots(reqs.slots());
}
if (reqs.ic_slots() > 0) {
int ic_slots = properties_.ic_slots();
- node->SetFirstFeedbackICSlot(FeedbackVectorICSlot(ic_slots));
+ node->SetFirstFeedbackICSlot(FeedbackVectorICSlot(ic_slots),
+ &ic_slot_cache_);
properties_.increase_ic_slots(reqs.ic_slots());
if (FLAG_vector_ics) {
for (int i = 0; i < reqs.ic_slots(); i++) {
@@ -81,6 +83,9 @@ class AstNumberingVisitor FINAL : public AstVisitor {
int next_id_;
AstProperties properties_;
+ // The slot cache allows us to reuse certain vector IC slots. It's only used
+ // if FLAG_vector_ics is true.
+ ICSlotCache ic_slot_cache_;
BailoutReason dont_optimize_reason_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@@ -186,7 +191,6 @@ void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
IncrementNodeCount();
DisableOptimization(kImportDeclaration);
VisitVariableProxy(node->proxy());
- Visit(node->module());
}
diff --git a/deps/v8/src/ast-value-factory.h b/deps/v8/src/ast-value-factory.h
index fe333254bb..b34a55b2a6 100644
--- a/deps/v8/src/ast-value-factory.h
+++ b/deps/v8/src/ast-value-factory.h
@@ -230,37 +230,41 @@ class AstValue : public ZoneObject {
// For generating constants.
-#define STRING_CONSTANTS(F) \
- F(anonymous_function, "(anonymous function)") \
- F(arguments, "arguments") \
- F(constructor, "constructor") \
- F(done, "done") \
- F(dot, ".") \
- F(dot_for, ".for") \
- F(dot_generator, ".generator") \
- F(dot_generator_object, ".generator_object") \
- F(dot_iterator, ".iterator") \
- F(dot_module, ".module") \
- F(dot_result, ".result") \
- F(empty, "") \
- F(eval, "eval") \
- F(get_template_callsite, "GetTemplateCallSite") \
- F(initialize_const_global, "initializeConstGlobal") \
- F(initialize_var_global, "initializeVarGlobal") \
- F(is_construct_call, "_IsConstructCall") \
- F(let, "let") \
- F(make_reference_error, "MakeReferenceErrorEmbedded") \
- F(make_syntax_error, "MakeSyntaxErrorEmbedded") \
- F(make_type_error, "MakeTypeErrorEmbedded") \
- F(native, "native") \
- F(new_target, "new.target") \
- F(next, "next") \
- F(proto, "__proto__") \
- F(prototype, "prototype") \
- F(this, "this") \
- F(use_asm, "use asm") \
- F(use_strong, "use strong") \
- F(use_strict, "use strict") \
+#define STRING_CONSTANTS(F) \
+ F(anonymous_function, "(anonymous function)") \
+ F(arguments, "arguments") \
+ F(constructor, "constructor") \
+ F(default, "default") \
+ F(done, "done") \
+ F(dot, ".") \
+ F(dot_for, ".for") \
+ F(dot_generator, ".generator") \
+ F(dot_generator_object, ".generator_object") \
+ F(dot_iterator, ".iterator") \
+ F(dot_module, ".module") \
+ F(dot_result, ".result") \
+ F(empty, "") \
+ F(eval, "eval") \
+ F(get_template_callsite, "GetTemplateCallSite") \
+ F(initialize_const_global, "initializeConstGlobal") \
+ F(initialize_var_global, "initializeVarGlobal") \
+ F(is_construct_call, "_IsConstructCall") \
+ F(is_spec_object, "_IsSpecObject") \
+ F(let, "let") \
+ F(make_reference_error, "MakeReferenceErrorEmbedded") \
+ F(make_syntax_error, "MakeSyntaxErrorEmbedded") \
+ F(make_type_error, "MakeTypeErrorEmbedded") \
+ F(native, "native") \
+ F(new_target, "new.target") \
+ F(next, "next") \
+ F(proto, "__proto__") \
+ F(prototype, "prototype") \
+ F(this, "this") \
+ F(throw_iterator_result_not_an_object, "ThrowIteratorResultNotAnObject") \
+ F(to_string, "ToString") \
+ F(use_asm, "use asm") \
+ F(use_strong, "use strong") \
+ F(use_strict, "use strict") \
F(value, "value")
#define OTHER_CONSTANTS(F) \
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 7a7e3d37fc..8caf9c2a29 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -59,24 +59,29 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
}
-VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
- : Expression(zone, position),
+VariableProxy::VariableProxy(Zone* zone, Variable* var, int start_position,
+ int end_position)
+ : Expression(zone, start_position),
bit_field_(IsThisField::encode(var->is_this()) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
- raw_name_(var->raw_name()) {
+ raw_name_(var->raw_name()),
+ end_position_(end_position) {
BindTo(var);
}
-VariableProxy::VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
- int position)
- : Expression(zone, position),
- bit_field_(IsThisField::encode(is_this) | IsAssignedField::encode(false) |
+VariableProxy::VariableProxy(Zone* zone, const AstRawString* name,
+ Variable::Kind variable_kind, int start_position,
+ int end_position)
+ : Expression(zone, start_position),
+ bit_field_(IsThisField::encode(variable_kind == Variable::THIS) |
+ IsAssignedField::encode(false) |
IsResolvedField::encode(false)),
variable_feedback_slot_(FeedbackVectorICSlot::Invalid()),
- raw_name_(name) {}
+ raw_name_(name),
+ end_position_(end_position) {}
void VariableProxy::BindTo(Variable* var) {
@@ -87,6 +92,35 @@ void VariableProxy::BindTo(Variable* var) {
}
+void VariableProxy::SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) {
+ variable_feedback_slot_ = slot;
+ if (var()->IsUnallocated()) {
+ cache->Add(VariableICSlotPair(var(), slot));
+ }
+}
+
+
+FeedbackVectorRequirements VariableProxy::ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) {
+ if (UsesVariableFeedbackSlot()) {
+ // VariableProxies that point to the same Variable within a function can
+ // make their loads from the same IC slot.
+ if (var()->IsUnallocated()) {
+ for (int i = 0; i < cache->length(); i++) {
+ VariableICSlotPair& pair = cache->at(i);
+ if (pair.variable() == var()) {
+ variable_feedback_slot_ = pair.slot();
+ return FeedbackVectorRequirements(0, 0);
+ }
+ }
+ }
+ return FeedbackVectorRequirements(0, 1);
+ }
+ return FeedbackVectorRequirements(0, 0);
+}
+
+
Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
Expression* value, int pos)
: Expression(zone, pos),
@@ -562,7 +596,8 @@ bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
}
-FeedbackVectorRequirements Call::ComputeFeedbackRequirements(Isolate* isolate) {
+FeedbackVectorRequirements Call::ComputeFeedbackRequirements(
+ Isolate* isolate, const ICSlotCache* cache) {
int ic_slots = IsUsingCallFeedbackICSlot(isolate) ? 1 : 0;
int slots = IsUsingCallFeedbackSlot(isolate) ? 1 : 0;
// A Call uses either a slot or an IC slot.
@@ -590,48 +625,6 @@ Call::CallType Call::GetCallType(Isolate* isolate) const {
}
-bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
- LookupIterator* it) {
- target_ = Handle<JSFunction>::null();
- cell_ = Handle<Cell>::null();
- DCHECK(it->IsFound() && it->GetHolder<JSObject>().is_identical_to(global));
- cell_ = it->GetPropertyCell();
- if (cell_->value()->IsJSFunction()) {
- Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
- // If the function is in new space we assume it's more likely to
- // change and thus prefer the general IC code.
- if (!it->isolate()->heap()->InNewSpace(*candidate)) {
- target_ = candidate;
- return true;
- }
- }
- return false;
-}
-
-
-void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- FeedbackVectorSlot allocation_site_feedback_slot =
- FLAG_pretenuring_call_new ? AllocationSiteFeedbackSlot()
- : CallNewFeedbackSlot();
- allocation_site_ =
- oracle->GetCallNewAllocationSite(allocation_site_feedback_slot);
- is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
- if (is_monomorphic_) {
- target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
- }
-}
-
-
-void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
- DCHECK(!is_computed_name());
- TypeFeedbackId id = key()->AsLiteral()->LiteralFeedbackId();
- SmallMapList maps;
- oracle->CollectReceiverTypes(id, &maps);
- receiver_type_ = maps.length() == 1 ? maps.at(0)
- : Handle<Map>::null();
-}
-
-
// ----------------------------------------------------------------------------
// Implementation of AstVisitor
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 39bef91414..27e8f09a90 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -165,11 +165,30 @@ class FeedbackVectorRequirements {
};
+class VariableICSlotPair FINAL {
+ public:
+ VariableICSlotPair(Variable* variable, FeedbackVectorICSlot slot)
+ : variable_(variable), slot_(slot) {}
+ VariableICSlotPair()
+ : variable_(NULL), slot_(FeedbackVectorICSlot::Invalid()) {}
+
+ Variable* variable() const { return variable_; }
+ FeedbackVectorICSlot slot() const { return slot_; }
+
+ private:
+ Variable* variable_;
+ FeedbackVectorICSlot slot_;
+};
+
+
+typedef List<VariableICSlotPair> ICSlotCache;
+
+
class AstProperties FINAL BASE_EMBEDDED {
public:
class Flags : public EnumSet<AstPropertiesFlag, int> {};
- AstProperties() : node_count_(0) {}
+ explicit AstProperties(Zone* zone) : node_count_(0), spec_(zone) {}
Flags* flags() { return &flags_; }
int node_count() { return node_count_; }
@@ -181,12 +200,12 @@ class AstProperties FINAL BASE_EMBEDDED {
int ic_slots() const { return spec_.ic_slots(); }
void increase_ic_slots(int count) { spec_.increase_ic_slots(count); }
void SetKind(int ic_slot, Code::Kind kind) { spec_.SetKind(ic_slot, kind); }
- const FeedbackVectorSpec& get_spec() const { return spec_; }
+ const ZoneFeedbackVectorSpec* get_spec() const { return &spec_; }
private:
Flags flags_;
int node_count_;
- FeedbackVectorSpec spec_;
+ ZoneFeedbackVectorSpec spec_;
};
@@ -229,11 +248,12 @@ class AstNode: public ZoneObject {
// not really nice, but multiple inheritance would introduce yet another
// vtable entry per node, something we don't want for space reasons.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate) {
+ Isolate* isolate, const ICSlotCache* cache) {
return FeedbackVectorRequirements(0, 0);
}
virtual void SetFirstFeedbackSlot(FeedbackVectorSlot slot) { UNREACHABLE(); }
- virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) {
+ virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) {
UNREACHABLE();
}
// Each ICSlot stores a kind of IC which the participating node should know.
@@ -609,23 +629,27 @@ class ImportDeclaration FINAL : public Declaration {
public:
DECLARE_NODE_TYPE(ImportDeclaration)
- Module* module() const { return module_; }
+ const AstRawString* import_name() const { return import_name_; }
+ const AstRawString* module_specifier() const { return module_specifier_; }
+ void set_module_specifier(const AstRawString* module_specifier) {
+ DCHECK(module_specifier_ == NULL);
+ module_specifier_ = module_specifier;
+ }
InitializationFlag initialization() const OVERRIDE {
- return kCreatedInitialized;
+ return kNeedsInitialization;
}
protected:
- ImportDeclaration(Zone* zone,
- VariableProxy* proxy,
- Module* module,
- Scope* scope,
- int pos)
- : Declaration(zone, proxy, LET, scope, pos),
- module_(module) {
- }
+ ImportDeclaration(Zone* zone, VariableProxy* proxy,
+ const AstRawString* import_name,
+ const AstRawString* module_specifier, Scope* scope, int pos)
+ : Declaration(zone, proxy, IMPORT, scope, pos),
+ import_name_(import_name),
+ module_specifier_(module_specifier) {}
private:
- Module* module_;
+ const AstRawString* import_name_;
+ const AstRawString* module_specifier_;
};
@@ -880,7 +904,7 @@ class ForInStatement FINAL : public ForEachStatement {
// Type feedback information.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate) OVERRIDE {
+ Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(1, 0);
}
void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
@@ -942,12 +966,12 @@ class ForOfStatement FINAL : public ForEachStatement {
return subject();
}
- // var iterator = subject[Symbol.iterator]();
+ // iterator = subject[Symbol.iterator]()
Expression* assign_iterator() const {
return assign_iterator_;
}
- // var result = iterator.next();
+ // result = iterator.next() // with type check
Expression* next_result() const {
return next_result_;
}
@@ -1414,7 +1438,6 @@ class ObjectLiteralProperty FINAL : public ZoneObject {
Kind kind() { return kind_; }
// Type feedback information.
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsMonomorphic() { return !receiver_type_.is_null(); }
Handle<Map> GetReceiverType() { return receiver_type_; }
@@ -1426,6 +1449,8 @@ class ObjectLiteralProperty FINAL : public ZoneObject {
bool is_static() const { return is_static_; }
bool is_computed_name() const { return is_computed_name_; }
+ void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
+
protected:
friend class AstNodeFactory;
@@ -1614,9 +1639,7 @@ class VariableProxy FINAL : public Expression {
public:
DECLARE_NODE_TYPE(VariableProxy)
- bool IsValidReferenceExpression() const OVERRIDE {
- return !is_resolved() || var()->IsValidReference();
- }
+ bool IsValidReferenceExpression() const OVERRIDE { return !is_this(); }
bool IsArguments() const { return is_resolved() && var()->is_arguments(); }
@@ -1647,6 +1670,8 @@ class VariableProxy FINAL : public Expression {
bit_field_ = IsResolvedField::update(bit_field_, true);
}
+ int end_position() const { return end_position_; }
+
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -1655,13 +1680,10 @@ class VariableProxy FINAL : public Expression {
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate) OVERRIDE {
- return FeedbackVectorRequirements(0, UsesVariableFeedbackSlot() ? 1 : 0);
- }
+ Isolate* isolate, const ICSlotCache* cache) OVERRIDE;
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
- variable_feedback_slot_ = slot;
- }
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) OVERRIDE;
Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::LOAD_IC; }
FeedbackVectorICSlot VariableFeedbackSlot() {
DCHECK(!UsesVariableFeedbackSlot() || !variable_feedback_slot_.IsInvalid());
@@ -1669,10 +1691,12 @@ class VariableProxy FINAL : public Expression {
}
protected:
- VariableProxy(Zone* zone, Variable* var, int position);
+ VariableProxy(Zone* zone, Variable* var, int start_position,
+ int end_position);
- VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
- int position);
+ VariableProxy(Zone* zone, const AstRawString* name,
+ Variable::Kind variable_kind, int start_position,
+ int end_position);
class IsThisField : public BitField8<bool, 0, 1> {};
class IsAssignedField : public BitField8<bool, 1, 1> {};
@@ -1686,6 +1710,10 @@ class VariableProxy FINAL : public Expression {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
};
+ // Position is stored in the AstNode superclass, but VariableProxy needs to
+ // know its end position too (for error messages). It cannot be inferred from
+ // the variable name length because it can contain escapes.
+ int end_position_;
};
@@ -1738,10 +1766,11 @@ class Property FINAL : public Expression {
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate) OVERRIDE {
+ Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(0, FLAG_vector_ics ? 1 : 0);
}
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) OVERRIDE {
property_feedback_slot_ = slot;
}
Code::Kind FeedbackICSlotKind(int index) OVERRIDE {
@@ -1788,8 +1817,9 @@ class Call FINAL : public Expression {
// Type feedback information.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate) OVERRIDE;
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+ Isolate* isolate, const ICSlotCache* cache) OVERRIDE;
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) OVERRIDE {
ic_slot_or_slot_ = slot.ToInt();
}
void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
@@ -1832,15 +1862,16 @@ class Call FINAL : public Expression {
Handle<JSFunction> target() { return target_; }
- Handle<Cell> cell() { return cell_; }
-
Handle<AllocationSite> allocation_site() { return allocation_site_; }
+ void SetKnownGlobalTarget(Handle<JSFunction> target) {
+ target_ = target;
+ set_is_uninitialized(false);
+ }
void set_target(Handle<JSFunction> target) { target_ = target; }
void set_allocation_site(Handle<AllocationSite> site) {
allocation_site_ = site;
}
- bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupIterator* it);
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); }
@@ -1895,7 +1926,6 @@ class Call FINAL : public Expression {
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
- Handle<Cell> cell_;
Handle<AllocationSite> allocation_site_;
class IsUninitializedField : public BitField8<bool, 0, 1> {};
uint8_t bit_field_;
@@ -1911,7 +1941,7 @@ class CallNew FINAL : public Expression {
// Type feedback information.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate) OVERRIDE {
+ Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(FLAG_pretenuring_call_new ? 2 : 1, 0);
}
void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
@@ -1927,7 +1957,6 @@ class CallNew FINAL : public Expression {
return CallNewFeedbackSlot().next();
}
- void RecordTypeFeedback(TypeFeedbackOracle* oracle);
bool IsMonomorphic() OVERRIDE { return is_monomorphic_; }
Handle<JSFunction> target() const { return target_; }
Handle<AllocationSite> allocation_site() const {
@@ -1938,6 +1967,16 @@ class CallNew FINAL : public Expression {
static int feedback_slots() { return 1; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); }
+ void set_allocation_site(Handle<AllocationSite> site) {
+ allocation_site_ = site;
+ }
+ void set_is_monomorphic(bool monomorphic) { is_monomorphic_ = monomorphic; }
+ void set_target(Handle<JSFunction> target) { target_ = target; }
+ void SetKnownGlobalTarget(Handle<JSFunction> target) {
+ target_ = target;
+ is_monomorphic_ = true;
+ }
+
protected:
CallNew(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
int pos)
@@ -1980,10 +2019,11 @@ class CallRuntime FINAL : public Expression {
return FLAG_vector_ics && is_jsruntime();
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate) OVERRIDE {
+ Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(0, HasCallRuntimeFeedbackSlot() ? 1 : 0);
}
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) OVERRIDE {
callruntime_feedback_slot_ = slot;
}
Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::LOAD_IC; }
@@ -2072,12 +2112,11 @@ class BinaryOperation FINAL : public Expression {
return TypeFeedbackId(local_id(1));
}
Maybe<int> fixed_right_arg() const {
- return has_fixed_right_arg_ ? Maybe<int>(fixed_right_arg_value_)
- : Maybe<int>();
+ return has_fixed_right_arg_ ? Just(fixed_right_arg_value_) : Nothing<int>();
}
void set_fixed_right_arg(Maybe<int> arg) {
- has_fixed_right_arg_ = arg.has_value;
- if (arg.has_value) fixed_right_arg_value_ = arg.value;
+ has_fixed_right_arg_ = arg.IsJust();
+ if (arg.IsJust()) fixed_right_arg_value_ = arg.FromJust();
}
virtual void RecordToBooleanTypeFeedback(
@@ -2354,10 +2393,11 @@ class Yield FINAL : public Expression {
return FLAG_vector_ics && (yield_kind() == kDelegating);
}
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate) OVERRIDE {
+ Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(0, HasFeedbackSlots() ? 3 : 0);
}
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) OVERRIDE {
yield_first_feedback_slot_ = slot;
}
Code::Kind FeedbackICSlotKind(int index) OVERRIDE {
@@ -2534,7 +2574,7 @@ class FunctionLiteral FINAL : public Expression {
void set_ast_properties(AstProperties* ast_properties) {
ast_properties_ = *ast_properties;
}
- const FeedbackVectorSpec& feedback_vector_spec() const {
+ const ZoneFeedbackVectorSpec* feedback_vector_spec() const {
return ast_properties_.get_spec();
}
bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
@@ -2558,6 +2598,7 @@ class FunctionLiteral FINAL : public Expression {
scope_(scope),
body_(body),
raw_inferred_name_(ast_value_factory->empty_string()),
+ ast_properties_(zone),
dont_optimize_reason_(kNoReason),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
@@ -2598,7 +2639,7 @@ class FunctionLiteral FINAL : public Expression {
class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
class IsParenthesized : public BitField<IsParenthesizedFlag, 5, 1> {};
- class FunctionKindBits : public BitField<FunctionKind, 6, 7> {};
+ class FunctionKindBits : public BitField<FunctionKind, 6, 8> {};
};
@@ -2695,10 +2736,11 @@ class SuperReference FINAL : public Expression {
// Type feedback information.
virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
- Isolate* isolate) OVERRIDE {
+ Isolate* isolate, const ICSlotCache* cache) OVERRIDE {
return FeedbackVectorRequirements(0, FLAG_vector_ics ? 1 : 0);
}
- void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+ void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot,
+ ICSlotCache* cache) OVERRIDE {
homeobject_feedback_slot_ = slot;
}
Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::LOAD_IC; }
@@ -3171,10 +3213,11 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
}
ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
- Module* module,
- Scope* scope,
- int pos) {
- return new (zone_) ImportDeclaration(zone_, proxy, module, scope, pos);
+ const AstRawString* import_name,
+ const AstRawString* module_specifier,
+ Scope* scope, int pos) {
+ return new (zone_) ImportDeclaration(zone_, proxy, import_name,
+ module_specifier, scope, pos);
}
ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
@@ -3369,14 +3412,17 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
}
VariableProxy* NewVariableProxy(Variable* var,
- int pos = RelocInfo::kNoPosition) {
- return new (zone_) VariableProxy(zone_, var, pos);
+ int start_position = RelocInfo::kNoPosition,
+ int end_position = RelocInfo::kNoPosition) {
+ return new (zone_) VariableProxy(zone_, var, start_position, end_position);
}
VariableProxy* NewVariableProxy(const AstRawString* name,
- bool is_this,
- int position = RelocInfo::kNoPosition) {
- return new (zone_) VariableProxy(zone_, name, is_this, position);
+ Variable::Kind variable_kind,
+ int start_position = RelocInfo::kNoPosition,
+ int end_position = RelocInfo::kNoPosition) {
+ return new (zone_)
+ VariableProxy(zone_, name, variable_kind, start_position, end_position);
}
Property* NewProperty(Expression* obj, Expression* key, int pos) {
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index cb31cc9982..dcc5eb4e6c 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -10,27 +10,35 @@ namespace internal {
BackgroundParsingTask::BackgroundParsingTask(
StreamedSource* source, ScriptCompiler::CompileOptions options,
int stack_size, Isolate* isolate)
- : source_(source), options_(options), stack_size_(stack_size) {
- // Prepare the data for the internalization phase and compilation phase, which
- // will happen in the main thread after parsing.
- source->info.Reset(new i::CompilationInfoWithZone(source->source_stream.get(),
- source->encoding, isolate));
- source->info->MarkAsGlobal();
-
+ : source_(source), stack_size_(stack_size) {
// We don't set the context to the CompilationInfo yet, because the background
// thread cannot do anything with it anyway. We set it just before compilation
// on the foreground thread.
DCHECK(options == ScriptCompiler::kProduceParserCache ||
options == ScriptCompiler::kProduceCodeCache ||
options == ScriptCompiler::kNoCompileOptions);
- source->allow_lazy =
- !i::Compiler::DebuggerWantsEagerCompilation(source->info.get());
- if (!source->allow_lazy && options_ == ScriptCompiler::kProduceParserCache) {
+ // Prepare the data for the internalization phase and compilation phase, which
+ // will happen in the main thread after parsing.
+ Zone* zone = new Zone();
+ ParseInfo* info = new ParseInfo(zone);
+ source->zone.Reset(zone);
+ source->info.Reset(info);
+ info->set_isolate(isolate);
+ info->set_source_stream(source->source_stream.get());
+ info->set_source_stream_encoding(source->encoding);
+ info->set_hash_seed(isolate->heap()->HashSeed());
+ info->set_global();
+ info->set_unicode_cache(&source_->unicode_cache);
+
+ bool disable_lazy = Compiler::DebuggerWantsEagerCompilation(isolate);
+ if (disable_lazy && options == ScriptCompiler::kProduceParserCache) {
// Producing cached data while parsing eagerly is not supported.
- options_ = ScriptCompiler::kNoCompileOptions;
+ options = ScriptCompiler::kNoCompileOptions;
}
- source->hash_seed = isolate->heap()->HashSeed();
+
+ info->set_compile_options(options);
+ info->set_allow_lazy_parsing(!disable_lazy);
}
@@ -40,20 +48,19 @@ void BackgroundParsingTask::Run() {
DisallowHandleDereference no_deref;
ScriptData* script_data = NULL;
- if (options_ == ScriptCompiler::kProduceParserCache ||
- options_ == ScriptCompiler::kProduceCodeCache) {
- source_->info->SetCachedData(&script_data, options_);
+ ScriptCompiler::CompileOptions options = source_->info->compile_options();
+ if (options == ScriptCompiler::kProduceParserCache ||
+ options == ScriptCompiler::kProduceCodeCache) {
+ source_->info->set_cached_data(&script_data);
}
uintptr_t stack_limit =
reinterpret_cast<uintptr_t>(&stack_limit) - stack_size_ * KB;
+ source_->info->set_stack_limit(stack_limit);
// Parser needs to stay alive for finalizing the parsing on the main
// thread. Passing &parse_info is OK because Parser doesn't store it.
- source_->parser.Reset(new Parser(source_->info.get(), stack_limit,
- source_->hash_seed,
- &source_->unicode_cache));
- source_->parser->set_allow_lazy(source_->allow_lazy);
+ source_->parser.Reset(new Parser(source_->info.get()));
source_->parser->ParseOnBackground(source_->info.get());
if (script_data != NULL) {
diff --git a/deps/v8/src/background-parsing-task.h b/deps/v8/src/background-parsing-task.h
index 19c93a833a..80e1e271d2 100644
--- a/deps/v8/src/background-parsing-task.h
+++ b/deps/v8/src/background-parsing-task.h
@@ -14,18 +14,13 @@
namespace v8 {
namespace internal {
-class Parser;
-
// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
// data which needs to be transmitted between threads for background parsing,
// finalizing it on the main thread, and compiling on the main thread.
struct StreamedSource {
StreamedSource(ScriptCompiler::ExternalSourceStream* source_stream,
ScriptCompiler::StreamedSource::Encoding encoding)
- : source_stream(source_stream),
- encoding(encoding),
- hash_seed(0),
- allow_lazy(false) {}
+ : source_stream(source_stream), encoding(encoding) {}
// Internal implementation of v8::ScriptCompiler::StreamedSource.
SmartPointer<ScriptCompiler::ExternalSourceStream> source_stream;
@@ -36,9 +31,8 @@ struct StreamedSource {
// between parsing and compilation. These need to be initialized before the
// compilation starts.
UnicodeCache unicode_cache;
- SmartPointer<CompilationInfo> info;
- uint32_t hash_seed;
- bool allow_lazy;
+ SmartPointer<Zone> zone;
+ SmartPointer<ParseInfo> info;
SmartPointer<Parser> parser;
private:
@@ -58,7 +52,6 @@ class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
private:
StreamedSource* source_; // Not owned.
- ScriptCompiler::CompileOptions options_;
int stack_size_;
};
}
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 8bfe7a9383..9b801c841d 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -116,19 +116,10 @@ namespace internal {
"Improper object on prototype chain for store") \
V(kIndexIsNegative, "Index is negative") \
V(kIndexIsTooLarge, "Index is too large") \
- V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf") \
V(kInlinedRuntimeFunctionFastOneByteArrayJoin, \
"Inlined runtime function: FastOneByteArrayJoin") \
- V(kInlinedRuntimeFunctionGeneratorNext, \
- "Inlined runtime function: GeneratorNext") \
- V(kInlinedRuntimeFunctionGeneratorThrow, \
- "Inlined runtime function: GeneratorThrow") \
V(kInlinedRuntimeFunctionGetFromCache, \
"Inlined runtime function: GetFromCache") \
- V(kInlinedRuntimeFunctionIsNonNegativeSmi, \
- "Inlined runtime function: IsNonNegativeSmi") \
- V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf, \
- "Inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
@@ -310,7 +301,6 @@ namespace internal {
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
V(kUnimplemented, "unimplemented") \
- V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
V(kUnsupportedConstCompoundAssignment, \
"Unsupported const compound assignment") \
V(kUnsupportedCountOperationWithConst, \
@@ -345,7 +335,8 @@ enum BailoutReason {
const char* GetBailoutReason(BailoutReason reason);
-}
-} // namespace v8::internal
+
+} // namespace internal
+} // namespace v8
#endif // V8_BAILOUT_REASON_H_
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index 0f4d4c712b..5c7cd74c61 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -148,17 +148,30 @@ inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) {
}
+// Precondition: 0 <= shift < 32
inline uint32_t RotateRight32(uint32_t value, uint32_t shift) {
if (shift == 0) return value;
return (value >> shift) | (value << (32 - shift));
}
+// Precondition: 0 <= shift < 32
+inline uint32_t RotateLeft32(uint32_t value, uint32_t shift) {
+ if (shift == 0) return value;
+ return (value << shift) | (value >> (32 - shift));
+}
+// Precondition: 0 <= shift < 64
inline uint64_t RotateRight64(uint64_t value, uint64_t shift) {
if (shift == 0) return value;
return (value >> shift) | (value << (64 - shift));
}
+// Precondition: 0 <= shift < 64
+inline uint64_t RotateLeft64(uint64_t value, uint64_t shift) {
+ if (shift == 0) return value;
+ return (value << shift) | (value >> (64 - shift));
+}
+
// SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and
// |rhs| and stores the result into the variable pointed to by |val| and
diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc
index 84cd231f61..872b20dcba 100644
--- a/deps/v8/src/base/cpu.cc
+++ b/deps/v8/src/base/cpu.cc
@@ -385,6 +385,8 @@ CPU::CPU()
case 0x37: // SLM
case 0x4a:
case 0x4d:
+ case 0x4c: // AMT
+ case 0x6e:
is_atom_ = true;
}
}
diff --git a/deps/v8/src/base/logging.cc b/deps/v8/src/base/logging.cc
index 25d77bb1ec..a2688c9c9a 100644
--- a/deps/v8/src/base/logging.cc
+++ b/deps/v8/src/base/logging.cc
@@ -5,10 +5,11 @@
#include "src/base/logging.h"
#if V8_LIBC_GLIBC || V8_OS_BSD
-# include <cxxabi.h>
-# include <execinfo.h>
+#include <cxxabi.h>
+#include <dlfcn.h>
+#include <execinfo.h>
#elif V8_OS_QNX
-# include <backtrace.h>
+#include <backtrace.h>
#endif // V8_LIBC_GLIBC || V8_OS_BSD
#include <cstdio>
@@ -54,28 +55,24 @@ void DumpBacktrace() {
#if V8_LIBC_GLIBC || V8_OS_BSD
void* trace[100];
int size = backtrace(trace, arraysize(trace));
- char** symbols = backtrace_symbols(trace, size);
OS::PrintError("\n==== C stack trace ===============================\n\n");
if (size == 0) {
OS::PrintError("(empty)\n");
- } else if (symbols == NULL) {
- OS::PrintError("(no symbols)\n");
} else {
for (int i = 1; i < size; ++i) {
OS::PrintError("%2d: ", i);
- char mangled[201];
- if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT
- int status;
- size_t length;
- char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
- OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+ Dl_info info;
+ char* demangled = NULL;
+ if (!dladdr(trace[i], &info) || !info.dli_sname) {
+ OS::PrintError("%p\n", trace[i]);
+ } else if ((demangled = abi::__cxa_demangle(info.dli_sname, 0, 0, 0))) {
+ OS::PrintError("%s\n", demangled);
free(demangled);
} else {
- OS::PrintError("??\n");
+ OS::PrintError("%s\n", info.dli_sname);
}
}
}
- free(symbols);
#elif V8_OS_QNX
char out[1024];
bt_accessor_t acc;
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 58316f8bc1..68ed70af93 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -188,7 +188,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
void* reservation = mmap(OS::GetRandomMmapAddr(),
request_size,
PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ MAP_PRIVATE | MAP_ANON,
kMmapFd,
kMmapFdOffset);
if (reservation == MAP_FAILED) return;
@@ -260,7 +260,7 @@ void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+ MAP_PRIVATE | MAP_ANON,
kMmapFd,
kMmapFdOffset);
@@ -288,7 +288,7 @@ bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return mmap(base,
size,
PROT_NONE,
- MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED,
kMmapFd,
kMmapFdOffset) != MAP_FAILED;
}
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index dc35d3d812..e43493f0ae 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -358,6 +358,11 @@ bool OS::Remove(const char* path) {
}
+bool OS::isDirectorySeparator(const char ch) {
+ return ch == '/';
+}
+
+
FILE* OS::OpenTemporaryFile() {
return tmpfile();
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 5a6f2f1cbf..07c0bc3ebf 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -575,6 +575,11 @@ bool OS::Remove(const char* path) {
}
+bool OS::isDirectorySeparator(const char ch) {
+ return ch == '/' || ch == '\\';
+}
+
+
FILE* OS::OpenTemporaryFile() {
// tmpfile_s tries to use the root dir, don't use it.
char tempPathBuffer[MAX_PATH];
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 07155f7a58..1873bbe57d 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -142,6 +142,8 @@ class OS {
static FILE* FOpen(const char* path, const char* mode);
static bool Remove(const char* path);
+ static bool isDirectorySeparator(const char ch);
+
// Opens a temporary file, the file is auto removed on close.
static FILE* OpenTemporaryFile();
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 2592bb7330..f0946251d5 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -13,8 +13,8 @@
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
#include "src/isolate-inl.h"
-#include "src/natives.h"
-#include "src/snapshot.h"
+#include "src/snapshot/natives.h"
+#include "src/snapshot/snapshot.h"
#include "third_party/fdlibm/fdlibm.h"
namespace v8 {
@@ -140,6 +140,7 @@ class Genesis BASE_EMBEDDED {
Handle<JSFunction> GetGeneratorPoisonFunction();
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
+ void CreateStrongModeFunctionMaps(Handle<JSFunction> empty);
// Make the "arguments" and "caller" properties throw a TypeError on access.
void PoisonArgumentsAndCaller(Handle<Map> map);
@@ -256,18 +257,21 @@ class Genesis BASE_EMBEDDED {
function_mode == FUNCTION_WITH_READONLY_PROTOTYPE);
}
- Handle<Map> CreateFunctionMap(FunctionMode function_mode);
+ Handle<Map> CreateSloppyFunctionMap(FunctionMode function_mode);
void SetFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode);
void MakeFunctionInstancePrototypeWritable();
- Handle<Map> CreateStrictFunctionMap(
- FunctionMode function_mode,
- Handle<JSFunction> empty_function);
+ Handle<Map> CreateStrictFunctionMap(FunctionMode function_mode,
+ Handle<JSFunction> empty_function);
+ Handle<Map> CreateStrongFunctionMap(Handle<JSFunction> empty_function,
+ bool is_constructor);
+
void SetStrictFunctionInstanceDescriptor(Handle<Map> map,
FunctionMode function_mode);
+ void SetStrongFunctionInstanceDescriptor(Handle<Map> map);
static bool CompileBuiltin(Isolate* isolate, int index);
static bool CompileExperimentalBuiltin(Isolate* isolate, int index);
@@ -335,7 +339,7 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
global_proxy->set_native_context(*factory->null_value());
SetObjectPrototype(global_proxy, factory->null_value());
- global_proxy->map()->set_constructor(*factory->null_value());
+ global_proxy->map()->SetConstructor(*factory->null_value());
if (FLAG_track_detached_contexts) {
env->GetIsolate()->AddDetachedContext(env);
}
@@ -378,51 +382,53 @@ void Genesis::SetFunctionInstanceDescriptor(
int size = IsFunctionModeWithPrototype(function_mode) ? 5 : 4;
Map::EnsureDescriptorSlack(map, size);
- PropertyAttributes attribs = static_cast<PropertyAttributes>(
- DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), attribs);
+ Accessors::FunctionLengthInfo(isolate(), roc_attribs);
{ // Add length.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, attribs);
+ length, roc_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), attribs);
+ Accessors::FunctionNameInfo(isolate(), ro_attribs);
{ // Add name.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
- attribs);
+ roc_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> args =
- Accessors::FunctionArgumentsInfo(isolate(), attribs);
+ Accessors::FunctionArgumentsInfo(isolate(), ro_attribs);
{ // Add arguments.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(args->name())), args,
- attribs);
+ ro_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> caller =
- Accessors::FunctionCallerInfo(isolate(), attribs);
+ Accessors::FunctionCallerInfo(isolate(), ro_attribs);
{ // Add caller.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(caller->name())),
- caller, attribs);
+ caller, ro_attribs);
map->AppendDescriptor(&d);
}
if (IsFunctionModeWithPrototype(function_mode)) {
if (function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE) {
- attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY);
+ ro_attribs = static_cast<PropertyAttributes>(ro_attribs & ~READ_ONLY);
}
Handle<AccessorInfo> prototype =
- Accessors::FunctionPrototypeInfo(isolate(), attribs);
+ Accessors::FunctionPrototypeInfo(isolate(), ro_attribs);
AccessorConstantDescriptor d(Handle<Name>(Name::cast(prototype->name())),
- prototype, attribs);
+ prototype, ro_attribs);
map->AppendDescriptor(&d);
}
}
-Handle<Map> Genesis::CreateFunctionMap(FunctionMode function_mode) {
+Handle<Map> Genesis::CreateSloppyFunctionMap(FunctionMode function_mode) {
Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
SetFunctionInstanceDescriptor(map, function_mode);
map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode));
@@ -437,7 +443,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Functions with this map will not have a 'prototype' property, and
// can not be used as constructors.
Handle<Map> function_without_prototype_map =
- CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
+ CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
native_context()->set_sloppy_function_without_prototype_map(
*function_without_prototype_map);
@@ -445,7 +451,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// of builtins.
// Later the map is replaced with writable prototype map, allocated below.
Handle<Map> function_map =
- CreateFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE);
+ CreateSloppyFunctionMap(FUNCTION_WITH_READONLY_PROTOTYPE);
native_context()->set_sloppy_function_map(*function_map);
native_context()->set_sloppy_function_with_readonly_prototype_map(
*function_map);
@@ -453,7 +459,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// The final map for functions. Writeable prototype.
// This map is installed in MakeFunctionInstancePrototypeWritable.
sloppy_function_map_writable_prototype_ =
- CreateFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE);
+ CreateSloppyFunctionMap(FUNCTION_WITH_WRITEABLE_PROTOTYPE);
Factory* factory = isolate->factory();
@@ -501,7 +507,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_function_map =
- CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
+ CreateSloppyFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
DCHECK(!empty_function_map->is_dictionary_map());
empty_function_map->SetPrototype(object_function_prototype);
empty_function_map->set_is_prototype_map(true);
@@ -536,6 +542,8 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
PropertyAttributes ro_attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+ PropertyAttributes roc_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY);
// Add length.
if (function_mode == BOUND_FUNCTION) {
@@ -547,16 +555,16 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
function_mode == FUNCTION_WITHOUT_PROTOTYPE);
Handle<AccessorInfo> length =
- Accessors::FunctionLengthInfo(isolate(), ro_attribs);
+ Accessors::FunctionLengthInfo(isolate(), roc_attribs);
AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
- length, ro_attribs);
+ length, roc_attribs);
map->AppendDescriptor(&d);
}
Handle<AccessorInfo> name =
- Accessors::FunctionNameInfo(isolate(), ro_attribs);
+ Accessors::FunctionNameInfo(isolate(), roc_attribs);
{ // Add name.
AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
- ro_attribs);
+ roc_attribs);
map->AppendDescriptor(&d);
}
{ // Add arguments.
@@ -583,6 +591,29 @@ void Genesis::SetStrictFunctionInstanceDescriptor(
}
+void Genesis::SetStrongFunctionInstanceDescriptor(Handle<Map> map) {
+ Map::EnsureDescriptorSlack(map, 2);
+
+ PropertyAttributes ro_attribs =
+ static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+
+ Handle<AccessorInfo> length =
+ Accessors::FunctionLengthInfo(isolate(), ro_attribs);
+ { // Add length.
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(length->name())),
+ length, ro_attribs);
+ map->AppendDescriptor(&d);
+ }
+ Handle<AccessorInfo> name =
+ Accessors::FunctionNameInfo(isolate(), ro_attribs);
+ { // Add name.
+ AccessorConstantDescriptor d(Handle<Name>(Name::cast(name->name())), name,
+ ro_attribs);
+ map->AppendDescriptor(&d);
+ }
+}
+
+
// ECMAScript 5th Edition, 13.2.3
Handle<JSFunction> Genesis::GetStrictPoisonFunction() {
if (strict_poison_function.is_null()) {
@@ -628,6 +659,18 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
}
+Handle<Map> Genesis::CreateStrongFunctionMap(
+ Handle<JSFunction> empty_function, bool is_constructor) {
+ Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
+ SetStrongFunctionInstanceDescriptor(map);
+ map->set_function_with_prototype(is_constructor);
+ map->SetPrototype(empty_function);
+ map->set_is_extensible(is_constructor);
+ // TODO(rossberg): mark strong
+ return map;
+}
+
+
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Allocate map for the prototype-less strict mode instances.
Handle<Map> strict_function_without_prototype_map =
@@ -659,6 +702,16 @@ void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
}
+void Genesis::CreateStrongModeFunctionMaps(Handle<JSFunction> empty) {
+ // Allocate map for strong mode instances, which never have prototypes.
+ Handle<Map> strong_function_map = CreateStrongFunctionMap(empty, false);
+ native_context()->set_strong_function_map(*strong_function_map);
+ // Constructors do, though.
+ Handle<Map> strong_constructor_map = CreateStrongFunctionMap(empty, true);
+ native_context()->set_strong_constructor_map(*strong_constructor_map);
+}
+
+
static void SetAccessors(Handle<Map> map,
Handle<String> name,
Handle<JSFunction> func) {
@@ -1264,8 +1317,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
map->set_inobject_properties(1);
// Copy constructor from the sloppy arguments boilerplate.
- map->set_constructor(
- native_context()->sloppy_arguments_map()->constructor());
+ map->SetConstructor(
+ native_context()->sloppy_arguments_map()->GetConstructor());
native_context()->set_strict_arguments_map(*map);
@@ -1412,8 +1465,8 @@ bool Genesis::CompileScriptCached(Isolate* isolate,
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
function_info = Compiler::CompileScript(
- source, script_name, 0, 0, false, false, top_context, extension, NULL,
- ScriptCompiler::kNoCompileOptions,
+ source, script_name, 0, 0, false, false, Handle<Object>(), top_context,
+ extension, NULL, ScriptCompiler::kNoCompileOptions,
use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE, false);
if (function_info.is_null()) return false;
if (cache != NULL) cache->Add(name, function_info);
@@ -1485,7 +1538,7 @@ static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
void Genesis::InstallNativeFunctions() {
HandleScope scope(isolate());
- INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
+ INSTALL_NATIVE(JSFunction, "$createDate", create_date_fun);
INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
INSTALL_NATIVE(JSFunction, "ToString", to_string_fun);
@@ -1501,7 +1554,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToCompletePropertyDescriptor",
to_complete_property_descriptor);
- INSTALL_NATIVE(JSFunction, "IsPromise", is_promise);
+ INSTALL_NATIVE(Symbol, "promiseStatus", promise_status);
INSTALL_NATIVE(JSFunction, "PromiseCreate", promise_create);
INSTALL_NATIVE(JSFunction, "PromiseResolve", promise_resolve);
INSTALL_NATIVE(JSFunction, "PromiseReject", promise_reject);
@@ -1598,9 +1651,7 @@ void Genesis::InitializeBuiltinTypedArrays() {
#define EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(id) \
void Genesis::InstallNativeFunctions_##id() {}
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_scoping)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_modules)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_strings)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrays)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_array_includes)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_classes)
@@ -1609,12 +1660,12 @@ EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_regexps)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrow_functions)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_numeric_literals)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_tostring)
-EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_templates)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode_regexps)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_computed_property_names)
EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_rest_parameters)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_reflect)
void Genesis::InstallNativeFunctions_harmony_proxies() {
@@ -1631,9 +1682,7 @@ void Genesis::InstallNativeFunctions_harmony_proxies() {
#define EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(id) \
void Genesis::InitializeGlobal_##id() {}
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_scoping)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_strings)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrays)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_array_includes)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_classes)
@@ -1642,7 +1691,6 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_literals)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
-EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_templates)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_computed_property_names)
@@ -1653,10 +1701,9 @@ void Genesis::InitializeGlobal_harmony_regexps() {
Handle<HeapObject> flag(FLAG_harmony_regexps ? heap()->true_value()
: heap()->false_value());
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
- Runtime::DefineObjectProperty(builtins, factory()->harmony_regexps_string(),
- flag, attributes).Assert();
+ Runtime::SetObjectProperty(isolate(), builtins,
+ factory()->harmony_regexps_string(), flag,
+ STRICT).Assert();
}
@@ -1665,11 +1712,51 @@ void Genesis::InitializeGlobal_harmony_unicode_regexps() {
Handle<HeapObject> flag(FLAG_harmony_unicode_regexps ? heap()->true_value()
: heap()->false_value());
- PropertyAttributes attributes =
- static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
- Runtime::DefineObjectProperty(builtins,
- factory()->harmony_unicode_regexps_string(),
- flag, attributes).Assert();
+ Runtime::SetObjectProperty(isolate(), builtins,
+ factory()->harmony_unicode_regexps_string(), flag,
+ STRICT).Assert();
+}
+
+
+void Genesis::InitializeGlobal_harmony_reflect() {
+ if (!FLAG_harmony_reflect) return;
+ Handle<JSObject> builtins(native_context()->builtins());
+ // Install references to functions of the Reflect object
+ {
+ Handle<JSFunction> apply =
+ InstallFunction(builtins, "ReflectApply", JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ Builtins::kReflectApply);
+ Handle<JSFunction> construct =
+ InstallFunction(builtins, "ReflectConstruct", JS_OBJECT_TYPE,
+ JSObject::kHeaderSize, MaybeHandle<JSObject>(),
+ Builtins::kReflectConstruct);
+ if (FLAG_vector_ics) {
+ // Apply embeds an IC, so we need a type vector of size 1 in the shared
+ // function info.
+ FeedbackVectorSpec spec(0, Code::CALL_IC);
+ Handle<TypeFeedbackVector> feedback_vector =
+ factory()->NewTypeFeedbackVector(&spec);
+ apply->shared()->set_feedback_vector(*feedback_vector);
+
+ feedback_vector = factory()->NewTypeFeedbackVector(&spec);
+ construct->shared()->set_feedback_vector(*feedback_vector);
+ }
+
+ apply->shared()->set_internal_formal_parameter_count(3);
+ apply->shared()->set_length(3);
+
+ construct->shared()->set_internal_formal_parameter_count(3);
+ construct->shared()->set_length(2);
+ }
+
+ Handle<JSGlobalObject> global(JSGlobalObject::cast(
+ native_context()->global_object()));
+ Handle<String> reflect_string =
+ factory()->NewStringFromStaticChars("Reflect");
+ Handle<Object> reflect =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ JSObject::AddProperty(global, reflect_string, reflect, DONT_ENUM);
}
@@ -2023,6 +2110,13 @@ bool Genesis::InstallNatives() {
native_context()->set_strict_generator_function_map(
*strict_generator_function_map);
+ Handle<Map> strong_function_map(native_context()->strong_function_map());
+ Handle<Map> strong_generator_function_map =
+ Map::Copy(strong_function_map, "StrongGeneratorFunction");
+ strong_generator_function_map->SetPrototype(generator_function_prototype);
+ native_context()->set_strong_generator_function_map(
+ *strong_generator_function_map);
+
Handle<JSFunction> object_function(native_context()->object_function());
Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
generator_object_prototype_map->SetPrototype(generator_object_prototype);
@@ -2047,14 +2141,12 @@ bool Genesis::InstallNatives() {
}
// Install natives.
- for (int i = Natives::GetDebuggerCount();
- i < Natives::GetBuiltinsCount();
- i++) {
+ int i = Natives::GetDebuggerCount();
+ if (!CompileBuiltin(isolate(), i)) return false;
+ if (!InstallJSBuiltins(builtins)) return false;
+
+ for (++i; i < Natives::GetBuiltinsCount(); ++i) {
if (!CompileBuiltin(isolate(), i)) return false;
- // TODO(ager): We really only need to install the JS builtin
- // functions on the builtins object after compiling and running
- // runtime.js.
- if (!InstallJSBuiltins(builtins)) return false;
}
InstallNativeFunctions();
@@ -2090,10 +2182,9 @@ bool Genesis::InstallNatives() {
if (FLAG_vector_ics) {
// Apply embeds an IC, so we need a type vector of size 1 in the shared
// function info.
- FeedbackVectorSpec spec(0, 1);
- spec.SetKind(0, Code::CALL_IC);
+ FeedbackVectorSpec spec(0, Code::CALL_IC);
Handle<TypeFeedbackVector> feedback_vector =
- factory()->NewTypeFeedbackVector(spec);
+ factory()->NewTypeFeedbackVector(&spec);
apply->shared()->set_feedback_vector(*feedback_vector);
}
@@ -2126,7 +2217,7 @@ bool Genesis::InstallNatives() {
// Add initial map.
Handle<Map> initial_map =
factory()->NewMap(JS_ARRAY_TYPE, JSRegExpResult::kSize);
- initial_map->set_constructor(*array_constructor);
+ initial_map->SetConstructor(*array_constructor);
// Set prototype on map.
initial_map->set_non_instance_prototype(false);
@@ -2213,11 +2304,8 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_array_includes_natives[] = {
"native harmony-array-includes.js", NULL};
static const char* harmony_proxies_natives[] = {"native proxy.js", NULL};
- static const char* harmony_strings_natives[] = {"native harmony-string.js",
- NULL};
static const char* harmony_classes_natives[] = {NULL};
static const char* harmony_modules_natives[] = {NULL};
- static const char* harmony_scoping_natives[] = {NULL};
static const char* harmony_object_literals_natives[] = {NULL};
static const char* harmony_regexps_natives[] = {
"native harmony-regexp.js", NULL};
@@ -2225,13 +2313,13 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_numeric_literals_natives[] = {NULL};
static const char* harmony_tostring_natives[] = {"native harmony-tostring.js",
NULL};
- static const char* harmony_templates_natives[] = {
- "native harmony-templates.js", NULL};
static const char* harmony_sloppy_natives[] = {NULL};
static const char* harmony_unicode_natives[] = {NULL};
static const char* harmony_unicode_regexps_natives[] = {NULL};
static const char* harmony_computed_property_names_natives[] = {NULL};
static const char* harmony_rest_parameters_natives[] = {NULL};
+ static const char* harmony_reflect_natives[] = {"native harmony-reflect.js",
+ NULL};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@@ -2269,15 +2357,24 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder,
void Genesis::InstallBuiltinFunctionIds() {
HandleScope scope(isolate());
+ struct BuiltinFunctionIds {
+ const char* holder_expr;
+ const char* fun_name;
+ BuiltinFunctionId id;
+ };
+
#define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \
- { \
- Handle<JSObject> holder = ResolveBuiltinIdHolder( \
- native_context(), #holder_expr); \
- BuiltinFunctionId id = k##name; \
- InstallBuiltinFunctionId(holder, #fun_name, id); \
- }
- FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)
+ { #holder_expr, #fun_name, k##name } \
+ ,
+ const BuiltinFunctionIds builtins[] = {
+ FUNCTIONS_WITH_ID_LIST(INSTALL_BUILTIN_ID)};
#undef INSTALL_BUILTIN_ID
+
+ for (const BuiltinFunctionIds& builtin : builtins) {
+ Handle<JSObject> holder =
+ ResolveBuiltinIdHolder(native_context(), builtin.holder_expr);
+ InstallBuiltinFunctionId(holder, builtin.fun_name, builtin.id);
+ }
}
@@ -2543,15 +2640,7 @@ bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) {
Handle<Object> function_object = Object::GetProperty(
isolate(), builtins, Builtins::GetName(id)).ToHandleChecked();
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
- // TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
- // the correct solution is to restore the context register after invoking
- // builtins from full-codegen.
- function->shared()->DisableOptimization(kBuiltinFunctionCannotBeOptimized);
builtins->set_javascript_builtin(id, *function);
- if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
- return false;
- }
- builtins->set_javascript_builtin_code(id, function->shared()->code());
}
return true;
}
@@ -2650,7 +2739,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
DCHECK(!to->HasFastProperties());
// Add to dictionary.
Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1);
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+ PropertyCellType::kMutable);
JSObject::SetNormalizedProperty(to, key, callbacks, d);
break;
}
@@ -2674,8 +2764,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
isolate());
DCHECK(!value->IsCell());
if (value->IsPropertyCell()) {
- value = Handle<Object>(PropertyCell::cast(*value)->value(),
- isolate());
+ value = handle(PropertyCell::cast(*value)->value(), isolate());
}
PropertyDetails details = properties->DetailsAt(i);
DCHECK_EQ(kData, details.kind());
@@ -2813,6 +2902,7 @@ Genesis::Genesis(Isolate* isolate,
CreateRoots();
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
+ CreateStrongModeFunctionMaps(empty_function);
Handle<GlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
HookUpGlobalProxy(global_object, global_proxy);
@@ -2827,9 +2917,13 @@ Genesis::Genesis(Isolate* isolate,
isolate->counters()->contexts_created_from_scratch()->Increment();
}
- // Install experimental natives.
- if (!InstallExperimentalNatives()) return;
- InitializeExperimentalGlobal();
+ // Install experimental natives. Do not include them into the snapshot as we
+ // should be able to turn them off at runtime. Re-installing them after
+ // they have already been deserialized would also fail.
+ if (!isolate->serializer_enabled()) {
+ InitializeExperimentalGlobal();
+ if (!InstallExperimentalNatives()) return;
+ }
// The serializer cannot serialize typed arrays. Reset those typed arrays
// for each new context.
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 21c246ca42..2457a956a7 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -1044,6 +1044,17 @@ MUST_USE_RESULT static MaybeHandle<Object> HandleApiCallHelper(
DCHECK(!args[0]->IsNull());
if (args[0]->IsUndefined()) args[0] = function->global_proxy();
+ if (!is_construct && !fun_data->accept_any_receiver()) {
+ Handle<Object> receiver(&args[0]);
+ if (receiver->IsJSObject() && receiver->IsAccessCheckNeeded()) {
+ Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
+ if (!isolate->MayAccess(js_receiver)) {
+ isolate->ReportFailedAccessCheck(js_receiver);
+ RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ }
+ }
+ }
+
Object* raw_holder = fun_data->GetCompatibleReceiver(isolate, args[0]);
if (raw_holder->IsNull()) {
@@ -1185,7 +1196,7 @@ MUST_USE_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
// Get the invocation callback from the function descriptor that was
// used to create the called object.
DCHECK(obj->map()->has_instance_call_handler());
- JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
+ JSFunction* constructor = JSFunction::cast(obj->map()->GetConstructor());
// TODO(ishell): turn this back to a DCHECK.
CHECK(constructor->shared()->IsApiFunction());
Object* handler =
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index cfbb77d7a3..c00a1a92c4 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -109,6 +109,8 @@ enum BuiltinExtraArguments {
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ReflectApply, BUILTIN, UNINITIALIZED, kNoExtraICState) \
+ V(ReflectConstruct, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState) \
@@ -193,6 +195,8 @@ enum BuiltinExtraArguments {
V(STRING_ADD_LEFT, 1) \
V(STRING_ADD_RIGHT, 1) \
V(APPLY_PREPARE, 1) \
+ V(REFLECT_APPLY_PREPARE, 1) \
+ V(REFLECT_CONSTRUCT_PREPARE, 2) \
V(STACK_OVERFLOW, 1)
class BuiltinFunctionTable;
@@ -316,6 +320,8 @@ class Builtins {
static void Generate_FunctionCall(MacroAssembler* masm);
static void Generate_FunctionApply(MacroAssembler* masm);
+ static void Generate_ReflectApply(MacroAssembler* masm);
+ static void Generate_ReflectConstruct(MacroAssembler* masm);
static void Generate_InternalArrayCode(MacroAssembler* masm);
static void Generate_ArrayCode(MacroAssembler* masm);
diff --git a/deps/v8/src/char-predicates-inl.h b/deps/v8/src/char-predicates-inl.h
index 71d1b06a92..d90f919341 100644
--- a/deps/v8/src/char-predicates-inl.h
+++ b/deps/v8/src/char-predicates-inl.h
@@ -35,6 +35,13 @@ inline bool IsInRange(int value, int lower_limit, int higher_limit) {
static_cast<unsigned int>(higher_limit - lower_limit);
}
+inline bool IsAsciiIdentifier(uc32 c) {
+ return IsAlphaNumeric(c) || c == '$' || c == '_';
+}
+
+inline bool IsAlphaNumeric(uc32 c) {
+ return IsInRange(AsciiAlphaToLower(c), 'a', 'z') || IsDecimalDigit(c);
+}
inline bool IsDecimalDigit(uc32 c) {
// ECMA-262, 3rd, 7.8.3 (p 16)
diff --git a/deps/v8/src/char-predicates.h b/deps/v8/src/char-predicates.h
index 5ecb07de99..c68ad74b6a 100644
--- a/deps/v8/src/char-predicates.h
+++ b/deps/v8/src/char-predicates.h
@@ -15,6 +15,8 @@ namespace internal {
inline bool IsCarriageReturn(uc32 c);
inline bool IsLineFeed(uc32 c);
+inline bool IsAsciiIdentifier(uc32 c);
+inline bool IsAlphaNumeric(uc32 c);
inline bool IsDecimalDigit(uc32 c);
inline bool IsHexDigit(uc32 c);
inline bool IsOctalDigit(uc32 c);
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 590dbbb027..71c5449aaa 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -12,6 +12,15 @@ namespace v8 {
namespace internal {
// static
+Callable CodeFactory::LoadGlobalIC(Isolate* isolate,
+ Handle<GlobalObject> global,
+ Handle<String> name) {
+ return Callable(LoadIC::load_global(isolate, global, name),
+ LoadDescriptor(isolate));
+}
+
+
+// static
Callable CodeFactory::LoadIC(Isolate* isolate, ContextualMode mode) {
return Callable(
LoadIC::initialize_stub(isolate, LoadICState(mode).GetExtraICState()),
@@ -20,14 +29,15 @@ Callable CodeFactory::LoadIC(Isolate* isolate, ContextualMode mode) {
// static
-Callable CodeFactory::LoadICInOptimizedCode(Isolate* isolate,
- ContextualMode mode) {
+Callable CodeFactory::LoadICInOptimizedCode(
+ Isolate* isolate, ContextualMode mode,
+ InlineCacheState initialization_state) {
+ auto code = LoadIC::initialize_stub_in_optimized_code(
+ isolate, LoadICState(mode).GetExtraICState(), initialization_state);
if (FLAG_vector_ics) {
- return Callable(LoadIC::initialize_stub_in_optimized_code(
- isolate, LoadICState(mode).GetExtraICState()),
- VectorLoadICDescriptor(isolate));
+ return Callable(code, VectorLoadICDescriptor(isolate));
}
- return CodeFactory::LoadIC(isolate, mode);
+ return Callable(code, LoadDescriptor(isolate));
}
@@ -39,12 +49,14 @@ Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
// static
-Callable CodeFactory::KeyedLoadICInOptimizedCode(Isolate* isolate) {
+Callable CodeFactory::KeyedLoadICInOptimizedCode(
+ Isolate* isolate, InlineCacheState initialization_state) {
+ auto code = KeyedLoadIC::initialize_stub_in_optimized_code(
+ isolate, initialization_state);
if (FLAG_vector_ics) {
- return Callable(KeyedLoadIC::initialize_stub_in_optimized_code(isolate),
- VectorLoadICDescriptor(isolate));
+ return Callable(code, VectorLoadICDescriptor(isolate));
}
- return CodeFactory::KeyedLoadIC(isolate);
+ return Callable(code, LoadDescriptor(isolate));
}
@@ -67,25 +79,35 @@ Callable CodeFactory::CallICInOptimizedCode(Isolate* isolate, int argc,
// static
Callable CodeFactory::StoreIC(Isolate* isolate, LanguageMode language_mode) {
- return Callable(StoreIC::initialize_stub(isolate, language_mode),
- StoreDescriptor(isolate));
+ return Callable(
+ StoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
+ StoreDescriptor(isolate));
}
// static
Callable CodeFactory::KeyedStoreIC(Isolate* isolate,
LanguageMode language_mode) {
- Handle<Code> ic = is_strict(language_mode)
- ? isolate->builtins()->KeyedStoreIC_Initialize_Strict()
- : isolate->builtins()->KeyedStoreIC_Initialize();
- return Callable(ic, StoreDescriptor(isolate));
+ return Callable(
+ KeyedStoreIC::initialize_stub(isolate, language_mode, UNINITIALIZED),
+ StoreDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::KeyedStoreICInOptimizedCode(
+ Isolate* isolate, LanguageMode language_mode,
+ InlineCacheState initialization_state) {
+ return Callable(KeyedStoreIC::initialize_stub(isolate, language_mode,
+ initialization_state),
+ StoreDescriptor(isolate));
}
// static
Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
Handle<Code> code = CompareIC::GetUninitialized(isolate, op);
- return Callable(code, BinaryOpDescriptor(isolate));
+ return Callable(code, CompareDescriptor(isolate));
}
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 5fd1646d52..f8af73b18e 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -32,16 +32,23 @@ class Callable FINAL BASE_EMBEDDED {
class CodeFactory FINAL {
public:
// Initial states for ICs.
+ static Callable LoadGlobalIC(Isolate* isolate, Handle<GlobalObject> global,
+ Handle<String> name);
static Callable LoadIC(Isolate* isolate, ContextualMode mode);
- static Callable LoadICInOptimizedCode(Isolate* isolate, ContextualMode mode);
+ static Callable LoadICInOptimizedCode(Isolate* isolate, ContextualMode mode,
+ InlineCacheState initialization_state);
static Callable KeyedLoadIC(Isolate* isolate);
- static Callable KeyedLoadICInOptimizedCode(Isolate* isolate);
+ static Callable KeyedLoadICInOptimizedCode(
+ Isolate* isolate, InlineCacheState initialization_state);
static Callable CallIC(Isolate* isolate, int argc,
CallICState::CallType call_type);
static Callable CallICInOptimizedCode(Isolate* isolate, int argc,
CallICState::CallType call_type);
static Callable StoreIC(Isolate* isolate, LanguageMode mode);
static Callable KeyedStoreIC(Isolate* isolate, LanguageMode mode);
+ static Callable KeyedStoreICInOptimizedCode(
+ Isolate* isolate, LanguageMode mode,
+ InlineCacheState initialization_state);
static Callable CompareIC(Isolate* isolate, Token::Value op);
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index f776abc043..8168aac4b8 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -35,7 +35,7 @@ static LChunk* OptimizeGraph(HGraph* graph) {
class CodeStubGraphBuilderBase : public HGraphBuilder {
public:
- explicit CodeStubGraphBuilderBase(CompilationInfoWithZone* info)
+ explicit CodeStubGraphBuilderBase(CompilationInfo* info)
: HGraphBuilder(info),
arguments_length_(NULL),
info_(info),
@@ -100,21 +100,6 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
HValue* shared_info,
HValue* native_context);
- // Tail calls handler found at array[map_index + 1].
- void TailCallHandler(HValue* receiver, HValue* name, HValue* array,
- HValue* map_index, HValue* slot, HValue* vector);
-
- // Tail calls handler_code.
- void TailCallHandler(HValue* receiver, HValue* name, HValue* slot,
- HValue* vector, HValue* handler_code);
-
- void TailCallMiss(HValue* receiver, HValue* name, HValue* slot,
- HValue* vector, bool keyed_load);
-
- // Handle MONOMORPHIC and POLYMORPHIC LoadIC and KeyedLoadIC cases.
- void HandleArrayCases(HValue* array, HValue* receiver, HValue* name,
- HValue* slot, HValue* vector, bool keyed_load);
-
private:
HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
@@ -122,7 +107,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
SmartArrayPointer<HParameter*> parameters_;
HValue* arguments_length_;
- CompilationInfoWithZone* info_;
+ CompilationInfo* info_;
CodeStubDescriptor descriptor_;
HContext* context_;
};
@@ -205,7 +190,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
template <class Stub>
class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
public:
- explicit CodeStubGraphBuilder(CompilationInfoWithZone* info)
+ explicit CodeStubGraphBuilder(CompilationInfo* info)
: CodeStubGraphBuilderBase(info) {}
protected:
@@ -287,7 +272,8 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
if (FLAG_profile_hydrogen_code_stub_compilation) {
timer.Start();
}
- CompilationInfoWithZone info(stub, isolate);
+ Zone zone;
+ CompilationInfo info(stub, isolate, &zone);
CodeStubGraphBuilder<Stub> builder(&info);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
Handle<Code> code = chunk->Codegen();
@@ -1354,7 +1340,8 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
StoreGlobalStub::property_cell_placeholder(isolate())));
HValue* cell = Add<HLoadNamedField>(weak_cell, nullptr,
HObjectAccess::ForWeakCellValue());
- HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
+ Add<HCheckHeapObject>(cell);
+ HObjectAccess access = HObjectAccess::ForPropertyCellValue();
HValue* cell_contents = Add<HLoadNamedField>(cell, nullptr, access);
if (stub->is_constant()) {
@@ -1374,8 +1361,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
builder.Then();
builder.Deopt(Deoptimizer::kUnexpectedCellContentsInGlobalStore);
builder.Else();
- HStoreNamedField* store = Add<HStoreNamedField>(cell, access, value);
- store->MarkReceiverAsCell();
+ Add<HStoreNamedField>(cell, access, value);
builder.End();
}
@@ -1726,7 +1712,7 @@ template <>
class CodeStubGraphBuilder<KeyedLoadGenericStub>
: public CodeStubGraphBuilderBase {
public:
- explicit CodeStubGraphBuilder(CompilationInfoWithZone* info)
+ explicit CodeStubGraphBuilder(CompilationInfo* info)
: CodeStubGraphBuilderBase(info) {}
protected:
@@ -2029,211 +2015,6 @@ Handle<Code> KeyedLoadGenericStub::GenerateCode() {
}
-void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
- HValue* array, HValue* map_index,
- HValue* slot, HValue* vector) {
- // The handler is at array[map_index + 1]. Compute this with a custom offset
- // to HLoadKeyed.
- int offset =
- GetDefaultHeaderSizeForElementsKind(FAST_ELEMENTS) + kPointerSize;
- HValue* handler_code = Add<HLoadKeyed>(
- array, map_index, nullptr, FAST_ELEMENTS, NEVER_RETURN_HOLE, offset);
- TailCallHandler(receiver, name, slot, vector, handler_code);
-}
-
-
-void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
- HValue* slot, HValue* vector,
- HValue* handler_code) {
- VectorLoadICDescriptor descriptor(isolate());
- HValue* op_vals[] = {context(), receiver, name, slot, vector};
- Add<HCallWithDescriptor>(handler_code, 0, descriptor,
- Vector<HValue*>(op_vals, 5), TAIL_CALL);
- // We never return here, it is a tail call.
-}
-
-
-void CodeStubGraphBuilderBase::TailCallMiss(HValue* receiver, HValue* name,
- HValue* slot, HValue* vector,
- bool keyed_load) {
- DCHECK(FLAG_vector_ics);
- Add<HTailCallThroughMegamorphicCache>(
- receiver, name, slot, vector,
- HTailCallThroughMegamorphicCache::ComputeFlags(keyed_load, true));
- // We never return here, it is a tail call.
-}
-
-
-void CodeStubGraphBuilderBase::HandleArrayCases(HValue* array, HValue* receiver,
- HValue* name, HValue* slot,
- HValue* vector,
- bool keyed_load) {
- HConstant* constant_two = Add<HConstant>(2);
- HConstant* constant_three = Add<HConstant>(3);
-
- IfBuilder if_receiver_heap_object(this);
- if_receiver_heap_object.IfNot<HIsSmiAndBranch>(receiver);
- if_receiver_heap_object.Then();
- Push(AddLoadMap(receiver, nullptr));
- if_receiver_heap_object.Else();
- HConstant* heap_number_map =
- Add<HConstant>(isolate()->factory()->heap_number_map());
- Push(heap_number_map);
- if_receiver_heap_object.End();
- HValue* receiver_map = Pop();
-
- HValue* start =
- keyed_load ? graph()->GetConstant1() : graph()->GetConstant0();
- HValue* weak_cell =
- Add<HLoadKeyed>(array, start, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
- // Load the weak cell value. It may be Smi(0), or a map. Compare nonetheless
- // against the receiver_map.
- HValue* array_map = Add<HLoadNamedField>(weak_cell, nullptr,
- HObjectAccess::ForWeakCellValue());
-
- IfBuilder if_correct_map(this);
- if_correct_map.If<HCompareObjectEqAndBranch>(receiver_map, array_map);
- if_correct_map.Then();
- { TailCallHandler(receiver, name, array, start, slot, vector); }
- if_correct_map.Else();
- {
- // If our array has more elements, the ic is polymorphic. Look for the
- // receiver map in the rest of the array.
- HValue* length = AddLoadFixedArrayLength(array, nullptr);
- LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
- constant_two);
- start = keyed_load ? constant_three : constant_two;
- HValue* key = builder.BeginBody(start, length, Token::LT);
- {
- HValue* weak_cell = Add<HLoadKeyed>(array, key, nullptr, FAST_ELEMENTS,
- ALLOW_RETURN_HOLE);
- HValue* array_map = Add<HLoadNamedField>(
- weak_cell, nullptr, HObjectAccess::ForWeakCellValue());
- IfBuilder if_correct_poly_map(this);
- if_correct_poly_map.If<HCompareObjectEqAndBranch>(receiver_map,
- array_map);
- if_correct_poly_map.Then();
- { TailCallHandler(receiver, name, array, key, slot, vector); }
- }
- builder.EndBody();
- }
- if_correct_map.End();
-}
-
-
-template <>
-HValue* CodeStubGraphBuilder<VectorLoadStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
- HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
- HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
- HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
-
- // If the feedback is an array, then the IC is in the monomorphic or
- // polymorphic state.
- HValue* feedback =
- Add<HLoadKeyed>(vector, slot, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
- IfBuilder array_checker(this);
- array_checker.If<HCompareMap>(feedback,
- isolate()->factory()->fixed_array_map());
- array_checker.Then();
- { HandleArrayCases(feedback, receiver, name, slot, vector, false); }
- array_checker.Else();
- {
- // Is the IC megamorphic?
- IfBuilder mega_checker(this);
- HConstant* megamorphic_symbol =
- Add<HConstant>(isolate()->factory()->megamorphic_symbol());
- mega_checker.If<HCompareObjectEqAndBranch>(feedback, megamorphic_symbol);
- mega_checker.Then();
- {
- // Probe the stub cache.
- Add<HTailCallThroughMegamorphicCache>(
- receiver, name, slot, vector,
- HTailCallThroughMegamorphicCache::ComputeFlags(false, false));
- }
- mega_checker.End();
- }
- array_checker.End();
-
- TailCallMiss(receiver, name, slot, vector, false);
- return graph()->GetConstant0();
-}
-
-
-Handle<Code> VectorLoadStub::GenerateCode() { return DoGenerateCode(this); }
-
-
-template <>
-HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
- HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
- HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
- HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
- HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
- HConstant* zero = graph()->GetConstant0();
-
- // If the feedback is an array, then the IC is in the monomorphic or
- // polymorphic state.
- HValue* feedback =
- Add<HLoadKeyed>(vector, slot, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
- IfBuilder array_checker(this);
- array_checker.If<HCompareMap>(feedback,
- isolate()->factory()->fixed_array_map());
- array_checker.Then();
- {
- // If feedback[0] is 0, then the IC has element handlers and name should be
- // a smi. If feedback[0] is a string, verify that it matches name.
- HValue* recorded_name = Add<HLoadKeyed>(feedback, zero, nullptr,
- FAST_ELEMENTS, ALLOW_RETURN_HOLE);
-
- IfBuilder recorded_name_is_zero(this);
- recorded_name_is_zero.If<HCompareObjectEqAndBranch>(recorded_name, zero);
- recorded_name_is_zero.Then();
- { Add<HCheckSmi>(name); }
- recorded_name_is_zero.Else();
- {
- IfBuilder strings_match(this);
- strings_match.IfNot<HCompareObjectEqAndBranch>(name, recorded_name);
- strings_match.Then();
- TailCallMiss(receiver, name, slot, vector, true);
- strings_match.End();
- }
- recorded_name_is_zero.End();
-
- HandleArrayCases(feedback, receiver, name, slot, vector, true);
- }
- array_checker.Else();
- {
- // Check if the IC is in megamorphic state.
- IfBuilder megamorphic_checker(this);
- HConstant* megamorphic_symbol =
- Add<HConstant>(isolate()->factory()->megamorphic_symbol());
- megamorphic_checker.If<HCompareObjectEqAndBranch>(feedback,
- megamorphic_symbol);
- megamorphic_checker.Then();
- {
- // Tail-call to the megamorphic KeyedLoadIC, treating it like a handler.
- Handle<Code> stub = KeyedLoadIC::ChooseMegamorphicStub(isolate());
- HValue* constant_stub = Add<HConstant>(stub);
- LoadDescriptor descriptor(isolate());
- HValue* op_vals[] = {context(), receiver, name};
- Add<HCallWithDescriptor>(constant_stub, 0, descriptor,
- Vector<HValue*>(op_vals, 3), TAIL_CALL);
- // We never return here, it is a tail call.
- }
- megamorphic_checker.End();
- }
- array_checker.End();
-
- TailCallMiss(receiver, name, slot, vector, true);
- return zero;
-}
-
-
-Handle<Code> VectorKeyedLoadStub::GenerateCode() {
- return DoGenerateCode(this);
-}
-
-
Handle<Code> MegamorphicLoadStub::GenerateCode() {
return DoGenerateCode(this);
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 6c68271bcd..f600cf316d 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -620,26 +620,6 @@ CallInterfaceDescriptor StoreTransitionStub::GetCallInterfaceDescriptor() {
}
-static void InitializeVectorLoadStub(Isolate* isolate,
- CodeStubDescriptor* descriptor,
- Address deoptimization_handler) {
- DCHECK(FLAG_vector_ics);
- descriptor->Initialize(deoptimization_handler);
-}
-
-
-void VectorLoadStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- InitializeVectorLoadStub(isolate(), descriptor,
- FUNCTION_ADDR(LoadIC_MissFromStubFailure));
-}
-
-
-void VectorKeyedLoadStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- InitializeVectorLoadStub(isolate(), descriptor,
- FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
-}
-
-
void MegamorphicLoadStub::InitializeDescriptor(CodeStubDescriptor* d) {}
@@ -684,7 +664,7 @@ void CreateWeakCellStub::InitializeDescriptor(CodeStubDescriptor* d) {}
void RegExpConstructResultStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
descriptor->Initialize(
- Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
+ Runtime::FunctionForId(Runtime::kRegExpConstructResultRT)->entry);
}
@@ -730,7 +710,7 @@ void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
void StringAddStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
- descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAdd)->entry);
+ descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAddRT)->entry);
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 2ae4ba7085..00541133a3 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -85,8 +85,8 @@ namespace internal {
V(StringAdd) \
V(ToBoolean) \
V(TransitionElementsKind) \
- V(VectorKeyedLoad) \
- V(VectorLoad) \
+ V(VectorRawKeyedLoad) \
+ V(VectorRawLoad) \
/* IC Handler stubs */ \
V(LoadConstant) \
V(LoadField) \
@@ -614,7 +614,7 @@ class FastNewClosureStub : public HydrogenCodeStub {
private:
STATIC_ASSERT(LANGUAGE_END == 3);
class LanguageModeBits : public BitField<LanguageMode, 0, 2> {};
- class FunctionKindBits : public BitField<FunctionKind, 2, 7> {};
+ class FunctionKindBits : public BitField<FunctionKind, 2, 8> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
@@ -1777,6 +1777,15 @@ enum ReceiverCheckMode {
};
+enum EmbedMode {
+ // The code being generated is part of an IC handler, which may MISS
+ // to an IC in failure cases.
+ PART_OF_IC_HANDLER,
+
+ NOT_PART_OF_IC_HANDLER
+};
+
+
// Generates code implementing String.prototype.charCodeAt.
//
// Only supports the case when the receiver is a string and the index
@@ -1813,7 +1822,7 @@ class StringCharCodeAtGenerator {
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
+ void GenerateSlow(MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper);
// Skip handling slow case and directly jump to bailout.
@@ -1913,9 +1922,9 @@ class StringCharAtGenerator {
// Generates the slow case code. Must not be naturally
// reachable. Expected to be put after a ret instruction (e.g., in
// deferred code). Always jumps back to the fast case.
- void GenerateSlow(MacroAssembler* masm,
+ void GenerateSlow(MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
- char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_code_at_generator_.GenerateSlow(masm, embed_mode, call_helper);
char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -2062,38 +2071,49 @@ class MegamorphicLoadStub : public HydrogenCodeStub {
};
-class VectorLoadStub : public HydrogenCodeStub {
+class VectorRawLoadStub : public PlatformCodeStub {
public:
- explicit VectorLoadStub(Isolate* isolate, const LoadICState& state)
- : HydrogenCodeStub(isolate) {
- set_sub_minor_key(state.GetExtraICState());
+ explicit VectorRawLoadStub(Isolate* isolate, const LoadICState& state)
+ : PlatformCodeStub(isolate) {
+ minor_key_ = state.GetExtraICState();
}
- Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
+ void GenerateForTrampoline(MacroAssembler* masm);
- InlineCacheState GetICState() const FINAL { return DEFAULT; }
+ virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
- ExtraICState GetExtraICState() const FINAL {
- return static_cast<ExtraICState>(sub_minor_key());
- }
+ virtual InlineCacheState GetICState() const FINAL OVERRIDE { return DEFAULT; }
- private:
- LoadICState state() const { return LoadICState(GetExtraICState()); }
+ virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+ return static_cast<ExtraICState>(minor_key_);
+ }
DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
- DEFINE_HYDROGEN_CODE_STUB(VectorLoad, HydrogenCodeStub);
+ DEFINE_PLATFORM_CODE_STUB(VectorRawLoad, PlatformCodeStub);
+
+ protected:
+ void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
-class VectorKeyedLoadStub : public VectorLoadStub {
+class VectorRawKeyedLoadStub : public PlatformCodeStub {
public:
- explicit VectorKeyedLoadStub(Isolate* isolate)
- : VectorLoadStub(isolate, LoadICState(0)) {}
+ explicit VectorRawKeyedLoadStub(Isolate* isolate)
+ : PlatformCodeStub(isolate) {}
- Code::Kind GetCodeKind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
+ void GenerateForTrampoline(MacroAssembler* masm);
+
+ virtual Code::Kind GetCodeKind() const OVERRIDE {
+ return Code::KEYED_LOAD_IC;
+ }
+
+ virtual InlineCacheState GetICState() const FINAL OVERRIDE { return DEFAULT; }
DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
- DEFINE_HYDROGEN_CODE_STUB(VectorKeyedLoad, VectorLoadStub);
+ DEFINE_PLATFORM_CODE_STUB(VectorRawKeyedLoad, PlatformCodeStub);
+
+ protected:
+ void GenerateImpl(MacroAssembler* masm, bool in_frame);
};
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 178ba4a69d..796e39a50c 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -12,6 +12,7 @@
#include "src/compiler.h"
#include "src/cpu-profiler.h"
#include "src/debug.h"
+#include "src/parser.h"
#include "src/prettyprinter.h"
#include "src/rewriter.h"
#include "src/runtime/runtime.h"
@@ -134,13 +135,13 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
}
#ifdef DEBUG
- if (!info->IsStub() && print_source) {
+ if (info->parse_info() && print_source) {
PrintF("--- Source from AST ---\n%s\n",
PrettyPrinter(info->isolate(), info->zone())
.PrintProgram(info->function()));
}
- if (!info->IsStub() && print_ast) {
+ if (info->parse_info() && print_ast) {
PrintF("--- AST ---\n%s\n", AstPrinter(info->isolate(), info->zone())
.PrintProgram(info->function()));
}
@@ -181,14 +182,27 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
(info->IsStub() && FLAG_print_code_stubs) ||
(info->IsOptimizing() && FLAG_print_opt_code));
if (print_code) {
- // Print the source code if available.
- FunctionLiteral* function = info->function();
- bool print_source = code->kind() == Code::OPTIMIZED_FUNCTION ||
- code->kind() == Code::FUNCTION;
+ const char* debug_name;
+ SmartArrayPointer<char> debug_name_holder;
+ if (info->IsStub()) {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ debug_name = CodeStub::MajorName(major_key, false);
+ } else {
+ debug_name_holder =
+ info->parse_info()->function()->debug_name()->ToCString();
+ debug_name = debug_name_holder.get();
+ }
CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
+
+ // Print the source code if available.
+ FunctionLiteral* function = nullptr;
+ bool print_source =
+ info->parse_info() && (code->kind() == Code::OPTIMIZED_FUNCTION ||
+ code->kind() == Code::FUNCTION);
if (print_source) {
+ function = info->function();
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
os << "--- Raw source ---\n";
@@ -207,10 +221,9 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
}
}
if (info->IsOptimizing()) {
- if (FLAG_print_unopt_code) {
+ if (FLAG_print_unopt_code && info->parse_info()) {
os << "--- Unoptimized code ---\n";
- info->closure()->shared()->code()->Disassemble(
- function->debug_name()->ToCString().get(), os);
+ info->closure()->shared()->code()->Disassemble(debug_name, os);
}
os << "--- Optimized code ---\n"
<< "optimization_id = " << info->optimization_id() << "\n";
@@ -220,12 +233,7 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
if (print_source) {
os << "source_position = " << function->start_position() << "\n";
}
- if (info->IsStub()) {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- code->Disassemble(CodeStub::MajorName(major_key, false), os);
- } else {
- code->Disassemble(function->debug_name()->ToCString().get(), os);
- }
+ code->Disassemble(debug_name, os);
os << "--- End code ---\n";
}
#endif // ENABLE_DISASSEMBLER
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index 94bda70357..c8cf639f97 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -20,26 +20,17 @@ function SetConstructor(iterable) {
throw MakeTypeError('constructor_not_function', ['Set']);
}
- var iter, adder;
+ %_SetInitialize(this);
if (!IS_NULL_OR_UNDEFINED(iterable)) {
- iter = GetIterator(ToObject(iterable));
- adder = this.add;
+ var adder = this.add;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['add', this]);
}
- }
-
- %_SetInitialize(this);
-
- if (IS_UNDEFINED(iter)) return;
- var next, done;
- while (!(next = iter.next()).done) {
- if (!IS_SPEC_OBJECT(next)) {
- throw MakeTypeError('iterator_result_not_an_object', [next]);
+ for (var value of iterable) {
+ %_CallFunction(this, value, adder);
}
- %_CallFunction(this, next.value, adder);
}
}
@@ -160,30 +151,20 @@ function MapConstructor(iterable) {
throw MakeTypeError('constructor_not_function', ['Map']);
}
- var iter, adder;
+ %_MapInitialize(this);
if (!IS_NULL_OR_UNDEFINED(iterable)) {
- iter = GetIterator(ToObject(iterable));
- adder = this.set;
+ var adder = this.set;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['set', this]);
}
- }
-
- %_MapInitialize(this);
- if (IS_UNDEFINED(iter)) return;
-
- var next, done, nextItem;
- while (!(next = iter.next()).done) {
- if (!IS_SPEC_OBJECT(next)) {
- throw MakeTypeError('iterator_result_not_an_object', [next]);
- }
- nextItem = next.value;
- if (!IS_SPEC_OBJECT(nextItem)) {
- throw MakeTypeError('iterator_value_not_an_object', [nextItem]);
+ for (var nextItem of iterable) {
+ if (!IS_SPEC_OBJECT(nextItem)) {
+ throw MakeTypeError('iterator_value_not_an_object', [nextItem]);
+ }
+ %_CallFunction(this, nextItem[0], nextItem[1], adder);
}
- %_CallFunction(this, nextItem[0], nextItem[1], adder);
}
}
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index b696ea5507..f2cb4c9000 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -6,7 +6,6 @@
#include "src/assembler.h"
#include "src/compilation-cache.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 2bad9e69ea..30777361a7 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
-
#include "src/compiler.h"
+#include <algorithm>
+
#include "src/ast-numbering.h"
#include "src/bootstrapper.h"
#include "src/codegen.h"
@@ -28,13 +28,13 @@
#include "src/scanner-character-streams.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
+#include "src/snapshot/serialize.h"
#include "src/typing.h"
#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
-
std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
if (p.IsUnknown()) {
return os << "<?>";
@@ -46,137 +46,65 @@ std::ostream& operator<<(std::ostream& os, const SourcePosition& p) {
}
-ScriptData::ScriptData(const byte* data, int length)
- : owns_data_(false), rejected_(false), data_(data), length_(length) {
- if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
- byte* copy = NewArray<byte>(length);
- DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
- CopyBytes(copy, data, length);
- data_ = copy;
- AcquireDataOwnership();
+#define PARSE_INFO_GETTER(type, name) \
+ type CompilationInfo::name() const { \
+ CHECK(parse_info()); \
+ return parse_info()->name(); \
}
-}
-CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
- : flags_(kThisHasUses),
- script_(script),
- source_stream_(NULL),
- osr_ast_id_(BailoutId::None()),
- parameter_count_(0),
- optimization_id_(-1),
- ast_value_factory_(NULL),
- ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false),
- osr_expr_stack_height_(0) {
- Initialize(script->GetIsolate(), BASE, zone);
-}
+#define PARSE_INFO_GETTER_WITH_DEFAULT(type, name, def) \
+ type CompilationInfo::name() const { \
+ return parse_info() ? parse_info()->name() : def; \
+ }
-CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
- Zone* zone)
- : flags_(kLazy | kThisHasUses),
- shared_info_(shared_info),
- script_(Handle<Script>(Script::cast(shared_info->script()))),
- source_stream_(NULL),
- osr_ast_id_(BailoutId::None()),
- parameter_count_(0),
- optimization_id_(-1),
- ast_value_factory_(NULL),
- ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false),
- osr_expr_stack_height_(0) {
- Initialize(script_->GetIsolate(), BASE, zone);
-}
+PARSE_INFO_GETTER(Handle<Script>, script)
+PARSE_INFO_GETTER(bool, is_eval)
+PARSE_INFO_GETTER(bool, is_native)
+PARSE_INFO_GETTER(bool, is_module)
+PARSE_INFO_GETTER(LanguageMode, language_mode)
+PARSE_INFO_GETTER_WITH_DEFAULT(Handle<JSFunction>, closure,
+ Handle<JSFunction>::null())
+PARSE_INFO_GETTER(FunctionLiteral*, function)
+PARSE_INFO_GETTER_WITH_DEFAULT(Scope*, scope, nullptr)
+PARSE_INFO_GETTER(Handle<Context>, context)
+PARSE_INFO_GETTER(Handle<SharedFunctionInfo>, shared_info)
+#undef PARSE_INFO_GETTER
+#undef PARSE_INFO_GETTER_WITH_DEFAULT
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
- : flags_(kLazy | kThisHasUses),
- closure_(closure),
- shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
- script_(Handle<Script>(Script::cast(shared_info_->script()))),
- source_stream_(NULL),
- context_(closure->context()),
- osr_ast_id_(BailoutId::None()),
- parameter_count_(0),
- optimization_id_(-1),
- ast_value_factory_(NULL),
- ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false),
- osr_expr_stack_height_(0) {
- Initialize(script_->GetIsolate(), BASE, zone);
-}
+// Exactly like a CompilationInfo, except being allocated via {new} and it also
+// creates and enters a Zone on construction and deallocates it on destruction.
+class CompilationInfoWithZone : public CompilationInfo {
+ public:
+ explicit CompilationInfoWithZone(Handle<JSFunction> function)
+ : CompilationInfo(new ParseInfo(&zone_, function)) {}
-CompilationInfo::CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone)
- : flags_(kLazy | kThisHasUses),
- source_stream_(NULL),
- osr_ast_id_(BailoutId::None()),
- parameter_count_(0),
- optimization_id_(-1),
- ast_value_factory_(NULL),
- ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false),
- osr_expr_stack_height_(0) {
- Initialize(isolate, STUB, zone);
- code_stub_ = stub;
-}
+ // Virtual destructor because a CompilationInfoWithZone has to exit the
+ // zone scope and get rid of dependent maps even when the destructor is
+ // called when cast as a CompilationInfo.
+ virtual ~CompilationInfoWithZone() {
+ DisableFutureOptimization();
+ RollbackDependencies();
+ delete parse_info_;
+ parse_info_ = nullptr;
+ }
+ private:
+ Zone zone_;
+};
-CompilationInfo::CompilationInfo(
- ScriptCompiler::ExternalSourceStream* stream,
- ScriptCompiler::StreamedSource::Encoding encoding, Isolate* isolate,
- Zone* zone)
- : flags_(kThisHasUses),
- source_stream_(stream),
- source_stream_encoding_(encoding),
- osr_ast_id_(BailoutId::None()),
- parameter_count_(0),
- optimization_id_(-1),
- ast_value_factory_(NULL),
- ast_value_factory_owned_(false),
- aborted_due_to_dependency_change_(false),
- osr_expr_stack_height_(0) {
- Initialize(isolate, BASE, zone);
+
+bool CompilationInfo::has_shared_info() const {
+ return parse_info_ && !parse_info_->shared_info().is_null();
}
-void CompilationInfo::Initialize(Isolate* isolate,
- Mode mode,
- Zone* zone) {
- isolate_ = isolate;
- function_ = NULL;
- scope_ = NULL;
- script_scope_ = NULL;
- extension_ = NULL;
- cached_data_ = NULL;
- compile_options_ = ScriptCompiler::kNoCompileOptions;
- zone_ = zone;
- deferred_handles_ = NULL;
- code_stub_ = NULL;
- prologue_offset_ = Code::kPrologueOffsetNotSet;
- opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count();
- no_frame_ranges_ = isolate->cpu_profiler()->is_profiling()
- ? new List<OffsetRange>(2) : NULL;
- if (FLAG_hydrogen_track_positions) {
- inlined_function_infos_ = new List<InlinedFunctionInfo>(5);
- inlining_id_to_function_id_ = new List<int>(5);
- } else {
- inlined_function_infos_ = NULL;
- inlining_id_to_function_id_ = NULL;
- }
-
- for (int i = 0; i < DependentCode::kGroupCount; i++) {
- dependencies_[i] = NULL;
- }
- if (mode == STUB) {
- mode_ = STUB;
- return;
- }
- mode_ = mode;
- if (!script_.is_null() && script_->type()->value() == Script::TYPE_NATIVE) {
- MarkAsNative();
- }
+CompilationInfo::CompilationInfo(ParseInfo* parse_info)
+ : CompilationInfo(parse_info, nullptr, BASE, parse_info->isolate(),
+ parse_info->zone()) {
// Compiling for the snapshot typically results in different code than
// compiling later on. This means that code recompiled with deoptimization
// support won't be "equivalent" (as defined by SharedFunctionInfo::
@@ -187,34 +115,54 @@ void CompilationInfo::Initialize(Isolate* isolate,
if (isolate_->debug()->is_active()) MarkAsDebug();
if (FLAG_context_specialization) MarkAsContextSpecializing();
+ if (FLAG_turbo_builtin_inlining) MarkAsBuiltinInliningEnabled();
if (FLAG_turbo_inlining) MarkAsInliningEnabled();
if (FLAG_turbo_splitting) MarkAsSplittingEnabled();
if (FLAG_turbo_types) MarkAsTypingEnabled();
- if (!shared_info_.is_null()) {
- DCHECK(is_sloppy(language_mode()));
- SetLanguageMode(shared_info_->language_mode());
- }
- bailout_reason_ = kNoReason;
-
- if (!shared_info().is_null() && shared_info()->is_compiled()) {
+ if (has_shared_info() && shared_info()->is_compiled()) {
// We should initialize the CompilationInfo feedback vector from the
// passed in shared info, rather than creating a new one.
- feedback_vector_ =
- Handle<TypeFeedbackVector>(shared_info()->feedback_vector(), isolate);
+ feedback_vector_ = Handle<TypeFeedbackVector>(
+ shared_info()->feedback_vector(), parse_info->isolate());
}
}
+CompilationInfo::CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone)
+ : CompilationInfo(nullptr, stub, STUB, isolate, zone) {}
+
+
+CompilationInfo::CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub,
+ Mode mode, Isolate* isolate, Zone* zone)
+ : parse_info_(parse_info),
+ isolate_(isolate),
+ flags_(0),
+ code_stub_(code_stub),
+ mode_(mode),
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(nullptr),
+ bailout_reason_(kNoReason),
+ prologue_offset_(Code::kPrologueOffsetNotSet),
+ no_frame_ranges_(isolate->cpu_profiler()->is_profiling()
+ ? new List<OffsetRange>(2)
+ : nullptr),
+ track_positions_(FLAG_hydrogen_track_positions ||
+ isolate->cpu_profiler()->is_profiling()),
+ opt_count_(has_shared_info() ? shared_info()->opt_count() : 0),
+ parameter_count_(0),
+ optimization_id_(-1),
+ aborted_due_to_dependency_change_(false),
+ osr_expr_stack_height_(0) {
+ std::fill_n(dependencies_, DependentCode::kGroupCount, nullptr);
+}
+
+
CompilationInfo::~CompilationInfo() {
- if (GetFlag(kDisableFutureOptimization)) {
- shared_info()->DisableOptimization(bailout_reason());
- }
+ DisableFutureOptimization();
delete deferred_handles_;
delete no_frame_ranges_;
- delete inlined_function_infos_;
- delete inlining_id_to_function_id_;
- if (ast_value_factory_owned_) delete ast_value_factory_;
#ifdef DEBUG
// Check that no dependent maps have been added or added dependent maps have
// been rolled back or committed.
@@ -273,33 +221,21 @@ void CompilationInfo::RollbackDependencies() {
int CompilationInfo::num_parameters() const {
- if (IsStub()) {
- DCHECK(parameter_count_ > 0);
- return parameter_count_;
- } else {
- return scope()->num_parameters();
- }
+ return has_scope() ? scope()->num_parameters() : parameter_count_;
}
int CompilationInfo::num_heap_slots() const {
- if (IsStub()) {
- return 0;
- } else {
- return scope()->num_heap_slots();
- }
+ return has_scope() ? scope()->num_heap_slots() : 0;
}
Code::Flags CompilationInfo::flags() const {
- if (IsStub()) {
- return Code::ComputeFlags(code_stub()->GetCodeKind(),
- code_stub()->GetICState(),
- code_stub()->GetExtraICState(),
- code_stub()->GetStubType());
- } else {
- return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
- }
+ return code_stub() != nullptr
+ ? Code::ComputeFlags(
+ code_stub()->GetCodeKind(), code_stub()->GetICState(),
+ code_stub()->GetExtraICState(), code_stub()->GetStubType())
+ : Code::ComputeFlags(Code::OPTIMIZED_FUNCTION);
}
@@ -307,17 +243,10 @@ Code::Flags CompilationInfo::flags() const {
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
bool CompilationInfo::ShouldSelfOptimize() {
- return FLAG_crankshaft &&
- !function()->flags()->Contains(kDontSelfOptimize) &&
- !function()->dont_optimize() &&
- function()->scope()->AllowsLazyCompilation() &&
- (shared_info().is_null() || !shared_info()->optimization_disabled());
-}
-
-
-void CompilationInfo::PrepareForCompilation(Scope* scope) {
- DCHECK(scope_ == NULL);
- scope_ = scope;
+ return FLAG_crankshaft && !function()->flags()->Contains(kDontSelfOptimize) &&
+ !function()->dont_optimize() &&
+ function()->scope()->AllowsLazyCompilation() &&
+ (!has_shared_info() || !shared_info()->optimization_disabled());
}
@@ -330,87 +259,95 @@ void CompilationInfo::EnsureFeedbackVector() {
bool CompilationInfo::is_simple_parameter_list() {
- return scope_->is_simple_parameter_list();
+ return scope()->is_simple_parameter_list();
}
int CompilationInfo::TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- SourcePosition position) {
- if (!FLAG_hydrogen_track_positions) {
- return 0;
- }
-
- DCHECK(inlined_function_infos_);
- DCHECK(inlining_id_to_function_id_);
- int id = 0;
- for (; id < inlined_function_infos_->length(); id++) {
- if (inlined_function_infos_->at(id).shared().is_identical_to(shared)) {
- break;
- }
- }
- if (id == inlined_function_infos_->length()) {
- inlined_function_infos_->Add(InlinedFunctionInfo(shared));
-
- if (!shared->script()->IsUndefined()) {
- Handle<Script> script(Script::cast(shared->script()));
- if (!script->source()->IsUndefined()) {
- CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
- OFStream os(tracing_scope.file());
- os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get()
- << ") id{" << optimization_id() << "," << id << "} ---\n";
- {
- DisallowHeapAllocation no_allocation;
- int start = shared->start_position();
- int len = shared->end_position() - start;
- String::SubStringRange source(String::cast(script->source()), start,
- len);
- for (const auto& c : source) {
- os << AsReversiblyEscapedUC16(c);
- }
+ SourcePosition position,
+ int parent_id) {
+ DCHECK(track_positions_);
+
+ int inline_id = static_cast<int>(inlined_function_infos_.size());
+ InlinedFunctionInfo info(parent_id, position, UnboundScript::kNoScriptId,
+ shared->start_position());
+ if (!shared->script()->IsUndefined()) {
+ Handle<Script> script(Script::cast(shared->script()));
+ info.script_id = script->id()->value();
+
+ if (FLAG_hydrogen_track_positions && !script->source()->IsUndefined()) {
+ CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+ OFStream os(tracing_scope.file());
+ os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get()
+ << ") id{" << optimization_id() << "," << inline_id << "} ---\n";
+ {
+ DisallowHeapAllocation no_allocation;
+ int start = shared->start_position();
+ int len = shared->end_position() - start;
+ String::SubStringRange source(String::cast(script->source()), start,
+ len);
+ for (const auto& c : source) {
+ os << AsReversiblyEscapedUC16(c);
}
-
- os << "\n--- END ---\n";
}
+
+ os << "\n--- END ---\n";
}
}
- int inline_id = inlining_id_to_function_id_->length();
- inlining_id_to_function_id_->Add(id);
+ inlined_function_infos_.push_back(info);
- if (inline_id != 0) {
+ if (FLAG_hydrogen_track_positions && inline_id != 0) {
CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
OFStream os(tracing_scope.file());
os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
- << optimization_id() << "," << id << "} AS " << inline_id << " AT "
- << position << std::endl;
+ << optimization_id() << "," << inline_id << "} AS " << inline_id
+ << " AT " << position << std::endl;
}
return inline_id;
}
+void CompilationInfo::LogDeoptCallPosition(int pc_offset, int inlining_id) {
+ if (!track_positions_ || IsStub()) return;
+ DCHECK_LT(static_cast<size_t>(inlining_id), inlined_function_infos_.size());
+ inlined_function_infos_.at(inlining_id).deopt_pc_offsets.push_back(pc_offset);
+}
+
+
class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
public:
explicit HOptimizedGraphBuilderWithPositions(CompilationInfo* info)
: HOptimizedGraphBuilder(info) {
}
-#define DEF_VISIT(type) \
- void Visit##type(type* node) OVERRIDE { \
- if (node->position() != RelocInfo::kNoPosition) { \
- SetSourcePosition(node->position()); \
- } \
- HOptimizedGraphBuilder::Visit##type(node); \
+#define DEF_VISIT(type) \
+ void Visit##type(type* node) OVERRIDE { \
+ SourcePosition old_position = SourcePosition::Unknown(); \
+ if (node->position() != RelocInfo::kNoPosition) { \
+ old_position = source_position(); \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ if (!old_position.IsUnknown()) { \
+ set_source_position(old_position); \
+ } \
}
EXPRESSION_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
-#define DEF_VISIT(type) \
- void Visit##type(type* node) OVERRIDE { \
- if (node->position() != RelocInfo::kNoPosition) { \
- SetSourcePosition(node->position()); \
- } \
- HOptimizedGraphBuilder::Visit##type(node); \
+#define DEF_VISIT(type) \
+ void Visit##type(type* node) OVERRIDE { \
+ SourcePosition old_position = SourcePosition::Unknown(); \
+ if (node->position() != RelocInfo::kNoPosition) { \
+ old_position = source_position(); \
+ SetSourcePosition(node->position()); \
+ } \
+ HOptimizedGraphBuilder::Visit##type(node); \
+ if (!old_position.IsUnknown()) { \
+ set_source_position(old_position); \
+ } \
}
STATEMENT_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
@@ -501,6 +438,13 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
if (info()->is_osr()) os << " OSR";
os << "]" << std::endl;
}
+
+ if (info()->shared_info()->asm_function()) {
+ info()->MarkAsContextSpecializing();
+ } else if (FLAG_turbo_type_feedback) {
+ info()->MarkAsTypeFeedbackEnabled();
+ }
+
Timer t(this, &time_taken_to_create_graph_);
compiler::Pipeline pipeline(info());
pipeline.GenerateCode();
@@ -509,6 +453,9 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
}
}
+ // Do not use Crankshaft if the code is intended to be serialized.
+ if (!isolate()->use_crankshaft()) return SetLastStatus(FAILED);
+
if (FLAG_trace_opt) {
OFStream os(stdout);
os << "[compiling method " << Brief(*info()->closure())
@@ -531,12 +478,12 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
info()->shared_info()->disable_optimization_reason());
}
- graph_builder_ = (FLAG_hydrogen_track_positions || FLAG_trace_ic)
- ? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
- : new(info()->zone()) HOptimizedGraphBuilder(info());
+ graph_builder_ = (info()->is_tracking_positions() || FLAG_trace_ic)
+ ? new (info()->zone())
+ HOptimizedGraphBuilderWithPositions(info())
+ : new (info()->zone()) HOptimizedGraphBuilder(info());
Timer t(this, &time_taken_to_create_graph_);
- info()->set_this_has_uses(false);
graph_ = graph_builder_->CreateGraph();
if (isolate()->has_pending_exception()) {
@@ -586,7 +533,8 @@ OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
// TODO(turbofan): Currently everything is done in the first phase.
if (!info()->code().is_null()) {
if (FLAG_turbo_deoptimization) {
- info()->context()->native_context()->AddOptimizedCode(*info()->code());
+ info()->parse_info()->context()->native_context()->AddOptimizedCode(
+ *info()->code());
}
RecordOptimizationStats();
return last_status();
@@ -698,7 +646,7 @@ static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// enabled as finding the line number is not free.
if (info->isolate()->logger()->is_logging_code_events() ||
info->isolate()->cpu_profiler()->is_profiling()) {
- Handle<Script> script = info->script();
+ Handle<Script> script = info->parse_info()->script();
Handle<Code> code = info->code();
if (code.is_identical_to(info->isolate()->builtins()->CompileLazy())) {
return;
@@ -714,16 +662,13 @@ static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
CodeCreateEvent(log_tag, *code, *shared, info, script_name,
line_num, column_num));
}
-
- GDBJIT(AddCode(Handle<String>(shared->DebugName()),
- Handle<Script>(info->script()), Handle<Code>(info->code()),
- info));
}
static bool CompileUnoptimizedCode(CompilationInfo* info) {
DCHECK(AllowCompilation::IsAllowed(info->isolate()));
- if (!Compiler::Analyze(info) || !FullCodeGenerator::MakeCode(info)) {
+ if (!Compiler::Analyze(info->parse_info()) ||
+ !FullCodeGenerator::MakeCode(info)) {
Isolate* isolate = info->isolate();
if (!isolate->has_pending_exception()) isolate->StackOverflow();
return false;
@@ -738,7 +683,7 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
PostponeInterruptsScope postpone(info->isolate());
// Parse and update CompilationInfo with the results.
- if (!Parser::ParseStatic(info)) return MaybeHandle<Code>();
+ if (!Parser::ParseStatic(info->parse_info())) return MaybeHandle<Code>();
Handle<SharedFunctionInfo> shared = info->shared_info();
FunctionLiteral* lit = info->function();
shared->set_language_mode(lit->language_mode());
@@ -814,22 +759,23 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
}
-static bool Renumber(CompilationInfo* info) {
- if (!AstNumbering::Renumber(info->isolate(), info->zone(),
- info->function())) {
+static bool Renumber(ParseInfo* parse_info) {
+ if (!AstNumbering::Renumber(parse_info->isolate(), parse_info->zone(),
+ parse_info->function())) {
return false;
}
- if (!info->shared_info().is_null()) {
- FunctionLiteral* lit = info->function();
- info->shared_info()->set_ast_node_count(lit->ast_node_count());
- MaybeDisableOptimization(info->shared_info(), lit->dont_optimize_reason());
- info->shared_info()->set_dont_cache(lit->flags()->Contains(kDontCache));
+ Handle<SharedFunctionInfo> shared_info = parse_info->shared_info();
+ if (!shared_info.is_null()) {
+ FunctionLiteral* lit = parse_info->function();
+ shared_info->set_ast_node_count(lit->ast_node_count());
+ MaybeDisableOptimization(shared_info, lit->dont_optimize_reason());
+ shared_info->set_dont_cache(lit->flags()->Contains(kDontCache));
}
return true;
}
-bool Compiler::Analyze(CompilationInfo* info) {
+bool Compiler::Analyze(ParseInfo* info) {
DCHECK(info->function() != NULL);
if (!Rewriter::Rewrite(info)) return false;
if (!Scope::Analyze(info)) return false;
@@ -839,14 +785,14 @@ bool Compiler::Analyze(CompilationInfo* info) {
}
-bool Compiler::ParseAndAnalyze(CompilationInfo* info) {
+bool Compiler::ParseAndAnalyze(ParseInfo* info) {
if (!Parser::ParseStatic(info)) return false;
return Compiler::Analyze(info);
}
static bool GetOptimizedCodeNow(CompilationInfo* info) {
- if (!Compiler::ParseAndAnalyze(info)) return false;
+ if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
@@ -883,8 +829,11 @@ static bool GetOptimizedCodeLater(CompilationInfo* info) {
}
CompilationHandleScope handle_scope(info);
- if (!Compiler::ParseAndAnalyze(info)) return false;
- info->SaveHandles(); // Copy handles to the compilation handle scope.
+ if (!Compiler::ParseAndAnalyze(info->parse_info())) return false;
+
+ // Reopen handles in the new CompilationHandleScope.
+ info->ReopenHandlesInNewHandleScope();
+ info->parse_info()->ReopenHandlesInNewHandleScope();
TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
@@ -930,14 +879,14 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
// If the debugger is active, do not compile with turbofan unless we can
// deopt from turbofan code.
if (FLAG_turbo_asm && function->shared()->asm_function() &&
- (FLAG_turbo_deoptimization || !isolate->debug()->is_active())) {
+ (FLAG_turbo_deoptimization || !isolate->debug()->is_active()) &&
+ !FLAG_turbo_osr) {
CompilationInfoWithZone info(function);
VMState<COMPILER> state(isolate);
PostponeInterruptsScope postpone(isolate);
info.SetOptimizing(BailoutId::None(), handle(function->shared()->code()));
- info.MarkAsContextSpecializing();
if (GetOptimizedCodeNow(&info)) {
DCHECK(function->shared()->is_compiled());
@@ -957,7 +906,7 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
ASSIGN_RETURN_ON_EXCEPTION(isolate, result, GetUnoptimizedCodeCommon(&info),
Code);
- if (FLAG_always_opt && isolate->use_crankshaft()) {
+ if (FLAG_always_opt) {
Handle<Code> opt_code;
if (Compiler::GetOptimizedCode(
function, result,
@@ -975,7 +924,9 @@ MaybeHandle<Code> Compiler::GetUnoptimizedCode(
DCHECK(!shared->GetIsolate()->has_pending_exception());
DCHECK(!shared->is_compiled());
- CompilationInfoWithZone info(shared);
+ Zone zone;
+ ParseInfo parse_info(&zone, shared);
+ CompilationInfo info(&parse_info);
return GetUnoptimizedCodeCommon(&info);
}
@@ -1002,14 +953,16 @@ bool Compiler::EnsureCompiled(Handle<JSFunction> function,
bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
DCHECK(info->function() != NULL);
DCHECK(info->scope() != NULL);
- if (!info->shared_info()->has_deoptimization_support()) {
- Handle<SharedFunctionInfo> shared = info->shared_info();
- CompilationInfoWithZone unoptimized(shared);
+ Handle<SharedFunctionInfo> shared = info->shared_info();
+ if (!shared->has_deoptimization_support()) {
+ // TODO(titzer): just reuse the ParseInfo for the unoptimized compile.
+ CompilationInfoWithZone unoptimized(info->closure());
// Note that we use the same AST that we will use for generating the
// optimized code.
- unoptimized.SetFunction(info->function());
- unoptimized.PrepareForCompilation(info->scope());
- unoptimized.SetContext(info->context());
+ ParseInfo* parse_info = unoptimized.parse_info();
+ parse_info->set_literal(info->function());
+ parse_info->set_scope(info->scope());
+ parse_info->set_context(info->context());
unoptimized.EnableDeoptimizationSupport();
// If the current code has reloc info for serialization, also include
// reloc info for serialization for the new code, so that deopt support
@@ -1079,16 +1032,18 @@ MaybeHandle<Code> Compiler::GetDebugCode(Handle<JSFunction> function) {
void Compiler::CompileForLiveEdit(Handle<Script> script) {
// TODO(635): support extensions.
- CompilationInfoWithZone info(script);
+ Zone zone;
+ ParseInfo parse_info(&zone, script);
+ CompilationInfo info(&parse_info);
PostponeInterruptsScope postpone(info.isolate());
VMState<COMPILER> state(info.isolate());
- info.MarkAsGlobal();
- if (!Parser::ParseStatic(&info)) return;
+ info.parse_info()->set_global();
+ if (!Parser::ParseStatic(info.parse_info())) return;
LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (!CompileUnoptimizedCode(&info)) return;
- if (!info.shared_info().is_null()) {
+ if (info.has_shared_info()) {
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info.isolate(), info.zone(), info.scope());
info.shared_info()->set_scope_info(*scope_info);
@@ -1101,40 +1056,44 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
Isolate* isolate = info->isolate();
PostponeInterruptsScope postpone(isolate);
DCHECK(!isolate->native_context().is_null());
- Handle<Script> script = info->script();
+ ParseInfo* parse_info = info->parse_info();
+ Handle<Script> script = parse_info->script();
// TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
FixedArray* array = isolate->native_context()->embedder_data();
- script->set_context_data(array->get(0));
+ script->set_context_data(array->get(v8::Context::kDebugIdIndex));
isolate->debug()->OnBeforeCompile(script);
- DCHECK(info->is_eval() || info->is_global() || info->is_module());
+ DCHECK(parse_info->is_eval() || parse_info->is_global() ||
+ parse_info->is_module());
- info->MarkAsToplevel();
+ parse_info->set_toplevel();
Handle<SharedFunctionInfo> result;
{ VMState<COMPILER> state(info->isolate());
- if (info->function() == NULL) {
+ if (parse_info->literal() == NULL) {
// Parse the script if needed (if it's already parsed, function() is
// non-NULL).
- bool parse_allow_lazy =
- (info->compile_options() == ScriptCompiler::kConsumeParserCache ||
- String::cast(script->source())->length() >
- FLAG_min_preparse_length) &&
- !Compiler::DebuggerWantsEagerCompilation(info);
+ ScriptCompiler::CompileOptions options = parse_info->compile_options();
+ bool parse_allow_lazy = (options == ScriptCompiler::kConsumeParserCache ||
+ String::cast(script->source())->length() >
+ FLAG_min_preparse_length) &&
+ !Compiler::DebuggerWantsEagerCompilation(isolate);
+ parse_info->set_allow_lazy_parsing(parse_allow_lazy);
if (!parse_allow_lazy &&
- (info->compile_options() == ScriptCompiler::kProduceParserCache ||
- info->compile_options() == ScriptCompiler::kConsumeParserCache)) {
+ (options == ScriptCompiler::kProduceParserCache ||
+ options == ScriptCompiler::kConsumeParserCache)) {
// We are going to parse eagerly, but we either 1) have cached data
// produced by lazy parsing or 2) are asked to generate cached data.
// Eager parsing cannot benefit from cached data, and producing cached
// data while parsing eagerly is not implemented.
- info->SetCachedData(NULL, ScriptCompiler::kNoCompileOptions);
+ parse_info->set_cached_data(nullptr);
+ parse_info->set_compile_options(ScriptCompiler::kNoCompileOptions);
}
- if (!Parser::ParseStatic(info, parse_allow_lazy)) {
+ if (!Parser::ParseStatic(parse_info)) {
return Handle<SharedFunctionInfo>::null();
}
}
@@ -1177,7 +1136,6 @@ static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
PROFILE(isolate, CodeCreateEvent(
log_tag, *info->code(), *result, info, *script_name));
- GDBJIT(AddCode(script_name, script, info->code(), info));
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
@@ -1214,12 +1172,14 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
if (!maybe_shared_info.ToHandle(&shared_info)) {
Handle<Script> script = isolate->factory()->NewScript(source);
- CompilationInfoWithZone info(script);
- info.MarkAsEval();
- if (context->IsNativeContext()) info.MarkAsGlobal();
- info.SetLanguageMode(language_mode);
- info.SetParseRestriction(restriction);
- info.SetContext(context);
+ Zone zone;
+ ParseInfo parse_info(&zone, script);
+ CompilationInfo info(&parse_info);
+ parse_info.set_eval();
+ if (context->IsNativeContext()) parse_info.set_global();
+ parse_info.set_language_mode(language_mode);
+ parse_info.set_parse_restriction(restriction);
+ parse_info.set_context(context);
Debug::RecordEvalCaller(script);
@@ -1254,8 +1214,8 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
Handle<SharedFunctionInfo> Compiler::CompileScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
int column_offset, bool is_embedder_debug_script,
- bool is_shared_cross_origin, Handle<Context> context,
- v8::Extension* extension, ScriptData** cached_data,
+ bool is_shared_cross_origin, Handle<Object> source_map_url,
+ Handle<Context> context, v8::Extension* extension, ScriptData** cached_data,
ScriptCompiler::CompileOptions compile_options, NativesFlag natives,
bool is_module) {
Isolate* isolate = source->GetIsolate();
@@ -1331,23 +1291,31 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
}
script->set_is_shared_cross_origin(is_shared_cross_origin);
script->set_is_embedder_debug_script(is_embedder_debug_script);
+ if (!source_map_url.is_null()) {
+ script->set_source_mapping_url(*source_map_url);
+ }
// Compile the function and add it to the cache.
- CompilationInfoWithZone info(script);
+ Zone zone;
+ ParseInfo parse_info(&zone, script);
+ CompilationInfo info(&parse_info);
if (FLAG_harmony_modules && is_module) {
- info.MarkAsModule();
+ parse_info.set_module();
} else {
- info.MarkAsGlobal();
+ parse_info.set_global();
}
- info.SetCachedData(cached_data, compile_options);
- info.SetExtension(extension);
- info.SetContext(context);
+ if (compile_options != ScriptCompiler::kNoCompileOptions) {
+ parse_info.set_cached_data(cached_data);
+ }
+ parse_info.set_compile_options(compile_options);
+ parse_info.set_extension(extension);
+ parse_info.set_context(context);
if (FLAG_serialize_toplevel &&
compile_options == ScriptCompiler::kProduceCodeCache) {
info.PrepareForSerializing();
}
- info.SetLanguageMode(
+ parse_info.set_language_mode(
static_cast<LanguageMode>(info.language_mode() | language_mode));
result = CompileToplevel(&info);
if (extension == NULL && !result.is_null() && !result->dont_cache()) {
@@ -1373,19 +1341,21 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
Handle<SharedFunctionInfo> Compiler::CompileStreamedScript(
- CompilationInfo* info, int source_length) {
- Isolate* isolate = info->isolate();
+ Handle<Script> script, ParseInfo* parse_info, int source_length) {
+ Isolate* isolate = script->GetIsolate();
+ // TODO(titzer): increment the counters in caller.
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
LanguageMode language_mode =
construct_language_mode(FLAG_use_strict, FLAG_use_strong);
- info->SetLanguageMode(
- static_cast<LanguageMode>(info->language_mode() | language_mode));
+ parse_info->set_language_mode(
+ static_cast<LanguageMode>(parse_info->language_mode() | language_mode));
+ CompilationInfo compile_info(parse_info);
// TODO(marja): FLAG_serialize_toplevel is not honoured and won't be; when the
// real code caching lands, streaming needs to be adapted to use it.
- return CompileToplevel(info);
+ return CompileToplevel(&compile_info);
}
@@ -1393,10 +1363,12 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
FunctionLiteral* literal, Handle<Script> script,
CompilationInfo* outer_info) {
// Precondition: code has been parsed and scopes have been analyzed.
- CompilationInfoWithZone info(script);
- info.SetFunction(literal);
- info.PrepareForCompilation(literal->scope());
- info.SetLanguageMode(literal->scope()->language_mode());
+ Zone zone;
+ ParseInfo parse_info(&zone, script);
+ CompilationInfo info(&parse_info);
+ parse_info.set_literal(literal);
+ parse_info.set_scope(literal->scope());
+ parse_info.set_language_mode(literal->scope()->language_mode());
if (outer_info->will_serialize()) info.PrepareForSerializing();
Isolate* isolate = info.isolate();
@@ -1412,10 +1384,11 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
// of functions without an outer context when setting a breakpoint through
// Debug::FindSharedFunctionInfoInScript.
bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
- bool allow_lazy = literal->AllowsLazyCompilation() &&
- !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
+ bool allow_lazy =
+ literal->AllowsLazyCompilation() &&
+ !DebuggerWantsEagerCompilation(isolate, allow_lazy_without_ctx);
- if (outer_info->is_toplevel() && outer_info->will_serialize()) {
+ if (outer_info->parse_info()->is_toplevel() && outer_info->will_serialize()) {
// Make sure that if the toplevel code (possibly to be serialized),
// the inner function must be allowed to be compiled lazily.
// This is necessary to serialize toplevel code without inner functions.
@@ -1436,7 +1409,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
// called.
info.EnsureFeedbackVector();
scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
- } else if (Renumber(&info) && FullCodeGenerator::MakeCode(&info)) {
+ } else if (Renumber(info.parse_info()) &&
+ FullCodeGenerator::MakeCode(&info)) {
// MakeCode will ensure that the feedback vector is present and
// appropriately sized.
DCHECK(!info.code().is_null());
@@ -1481,7 +1455,6 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
Isolate* isolate = info->isolate();
DCHECK(AllowCompilation::IsAllowed(isolate));
VMState<COMPILER> state(isolate);
- DCHECK(isolate->use_crankshaft());
DCHECK(!isolate->has_pending_exception());
PostponeInterruptsScope postpone(isolate);
@@ -1565,10 +1538,10 @@ Handle<Code> Compiler::GetConcurrentlyOptimizedCode(OptimizedCompileJob* job) {
}
-bool Compiler::DebuggerWantsEagerCompilation(CompilationInfo* info,
+bool Compiler::DebuggerWantsEagerCompilation(Isolate* isolate,
bool allow_lazy_without_ctx) {
- if (LiveEditFunctionTracker::IsActive(info->isolate())) return true;
- Debug* debug = info->isolate()->debug();
+ if (LiveEditFunctionTracker::IsActive(isolate)) return true;
+ Debug* debug = isolate->debug();
bool debugging = debug->is_active() || debug->has_break_points();
return debugging && !allow_lazy_without_ctx;
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 8ef2e0a95b..fb91a9e2f5 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -15,13 +15,8 @@ namespace internal {
class AstValueFactory;
class HydrogenCodeStub;
-
-// ParseRestriction is used to restrict the set of valid statements in a
-// unit of compilation. Restriction violations cause a syntax error.
-enum ParseRestriction {
- NO_PARSE_RESTRICTION, // All expressions are allowed.
- ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
-};
+class ParseInfo;
+class ScriptData;
struct OffsetRange {
OffsetRange(int from, int to) : from(from), to(to) {}
@@ -39,9 +34,9 @@ struct OffsetRange {
// script start.
class SourcePosition {
public:
- SourcePosition(const SourcePosition& other) : value_(other.value_) {}
-
- static SourcePosition Unknown() { return SourcePosition(kNoPosition); }
+ static SourcePosition Unknown() {
+ return SourcePosition::FromRaw(kNoPosition);
+ }
bool IsUnknown() const { return value_ == kNoPosition; }
@@ -72,10 +67,14 @@ class SourcePosition {
// Offset from the start of the inlined function.
typedef BitField<uint32_t, 9, 23> PositionField;
- explicit SourcePosition(uint32_t value) : value_(value) {}
-
friend class HPositionInfo;
- friend class LCodeGenBase;
+ friend class Deoptimizer;
+
+ static SourcePosition FromRaw(uint32_t raw_position) {
+ SourcePosition position;
+ position.value_ = raw_position;
+ return position;
+ }
// If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
// and PositionField.
@@ -87,52 +86,23 @@ class SourcePosition {
std::ostream& operator<<(std::ostream& os, const SourcePosition& p);
-class InlinedFunctionInfo {
- public:
- explicit InlinedFunctionInfo(Handle<SharedFunctionInfo> shared)
- : shared_(shared), start_position_(shared->start_position()) {}
-
- Handle<SharedFunctionInfo> shared() const { return shared_; }
- int start_position() const { return start_position_; }
-
- private:
- Handle<SharedFunctionInfo> shared_;
- int start_position_;
+struct InlinedFunctionInfo {
+ InlinedFunctionInfo(int parent_id, SourcePosition inline_position,
+ int script_id, int start_position)
+ : parent_id(parent_id),
+ inline_position(inline_position),
+ script_id(script_id),
+ start_position(start_position) {}
+ int parent_id;
+ SourcePosition inline_position;
+ int script_id;
+ int start_position;
+ std::vector<size_t> deopt_pc_offsets;
+
+ static const int kNoParentId = -1;
};
-class ScriptData {
- public:
- ScriptData(const byte* data, int length);
- ~ScriptData() {
- if (owns_data_) DeleteArray(data_);
- }
-
- const byte* data() const { return data_; }
- int length() const { return length_; }
- bool rejected() const { return rejected_; }
-
- void Reject() { rejected_ = true; }
-
- void AcquireDataOwnership() {
- DCHECK(!owns_data_);
- owns_data_ = true;
- }
-
- void ReleaseDataOwnership() {
- DCHECK(owns_data_);
- owns_data_ = false;
- }
-
- private:
- bool owns_data_ : 1;
- bool rejected_ : 1;
- const byte* data_;
- int length_;
-
- DISALLOW_COPY_AND_ASSIGN(ScriptData);
-};
-
// CompilationInfo encapsulates some information known at compile time. It
// is constructed based on the resources available at compile-time.
class CompilationInfo {
@@ -140,113 +110,67 @@ class CompilationInfo {
// Various configuration flags for a compilation, as well as some properties
// of the compiled code produced by a compilation.
enum Flag {
- kLazy = 1 << 0,
- kEval = 1 << 1,
- kGlobal = 1 << 2,
- kStrictMode = 1 << 3,
- kStrongMode = 1 << 4,
- kThisHasUses = 1 << 5,
- kNative = 1 << 6,
- kDeferredCalling = 1 << 7,
- kNonDeferredCalling = 1 << 8,
- kSavesCallerDoubles = 1 << 9,
- kRequiresFrame = 1 << 10,
- kMustNotHaveEagerFrame = 1 << 11,
- kDeoptimizationSupport = 1 << 12,
- kDebug = 1 << 13,
- kCompilingForDebugging = 1 << 14,
- kParseRestriction = 1 << 15,
- kSerializing = 1 << 16,
- kContextSpecializing = 1 << 17,
- kInliningEnabled = 1 << 18,
- kTypingEnabled = 1 << 19,
- kDisableFutureOptimization = 1 << 20,
- kModule = 1 << 21,
- kToplevel = 1 << 22,
- kSplittingEnabled = 1 << 23
+ kDeferredCalling = 1 << 0,
+ kNonDeferredCalling = 1 << 1,
+ kSavesCallerDoubles = 1 << 2,
+ kRequiresFrame = 1 << 3,
+ kMustNotHaveEagerFrame = 1 << 4,
+ kDeoptimizationSupport = 1 << 5,
+ kDebug = 1 << 6,
+ kCompilingForDebugging = 1 << 7,
+ kSerializing = 1 << 8,
+ kContextSpecializing = 1 << 9,
+ kInliningEnabled = 1 << 10,
+ kTypingEnabled = 1 << 11,
+ kDisableFutureOptimization = 1 << 12,
+ kSplittingEnabled = 1 << 13,
+ kBuiltinInliningEnabled = 1 << 14,
+ kTypeFeedbackEnabled = 1 << 15
};
- CompilationInfo(Handle<JSFunction> closure, Zone* zone);
- CompilationInfo(Handle<Script> script, Zone* zone);
+ explicit CompilationInfo(ParseInfo* parse_info);
CompilationInfo(CodeStub* stub, Isolate* isolate, Zone* zone);
virtual ~CompilationInfo();
+ ParseInfo* parse_info() const { return parse_info_; }
+
+ // -----------------------------------------------------------
+ // TODO(titzer): inline and delete accessors of ParseInfo
+ // -----------------------------------------------------------
+ Handle<Script> script() const;
+ bool is_eval() const;
+ bool is_native() const;
+ bool is_module() const;
+ LanguageMode language_mode() const;
+ Handle<JSFunction> closure() const;
+ FunctionLiteral* function() const;
+ Scope* scope() const;
+ Handle<Context> context() const;
+ Handle<SharedFunctionInfo> shared_info() const;
+ bool has_shared_info() const;
+ // -----------------------------------------------------------
+
Isolate* isolate() const {
return isolate_;
}
Zone* zone() { return zone_; }
bool is_osr() const { return !osr_ast_id_.IsNone(); }
- bool is_lazy() const { return GetFlag(kLazy); }
- bool is_eval() const { return GetFlag(kEval); }
- bool is_global() const { return GetFlag(kGlobal); }
- bool is_module() const { return GetFlag(kModule); }
- LanguageMode language_mode() const {
- STATIC_ASSERT(LANGUAGE_END == 3);
- return construct_language_mode(GetFlag(kStrictMode), GetFlag(kStrongMode));
- }
- FunctionLiteral* function() const { return function_; }
- Scope* scope() const { return scope_; }
- Scope* script_scope() const { return script_scope_; }
Handle<Code> code() const { return code_; }
- Handle<JSFunction> closure() const { return closure_; }
- Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
- Handle<Script> script() const { return script_; }
- void set_script(Handle<Script> script) { script_ = script; }
CodeStub* code_stub() const { return code_stub_; }
- v8::Extension* extension() const { return extension_; }
- ScriptData** cached_data() const { return cached_data_; }
- ScriptCompiler::CompileOptions compile_options() const {
- return compile_options_;
- }
- ScriptCompiler::ExternalSourceStream* source_stream() const {
- return source_stream_;
- }
- ScriptCompiler::StreamedSource::Encoding source_stream_encoding() const {
- return source_stream_encoding_;
- }
- Handle<Context> context() const { return context_; }
BailoutId osr_ast_id() const { return osr_ast_id_; }
Handle<Code> unoptimized_code() const { return unoptimized_code_; }
int opt_count() const { return opt_count_; }
int num_parameters() const;
int num_heap_slots() const;
Code::Flags flags() const;
-
- void MarkAsEval() {
- DCHECK(!is_lazy());
- SetFlag(kEval);
- }
-
- void MarkAsGlobal() {
- DCHECK(!is_lazy());
- SetFlag(kGlobal);
- }
-
- void MarkAsModule() {
- DCHECK(!is_lazy());
- SetFlag(kModule);
- }
+ bool has_scope() const { return scope() != nullptr; }
void set_parameter_count(int parameter_count) {
DCHECK(IsStub());
parameter_count_ = parameter_count;
}
- void set_this_has_uses(bool has_no_uses) {
- SetFlag(kThisHasUses, has_no_uses);
- }
-
- bool this_has_uses() { return GetFlag(kThisHasUses); }
-
- void SetLanguageMode(LanguageMode language_mode) {
- STATIC_ASSERT(LANGUAGE_END == 3);
- SetFlag(kStrictMode, language_mode & STRICT_BIT);
- SetFlag(kStrongMode, language_mode & STRONG_BIT);
- }
-
- void MarkAsNative() { SetFlag(kNative); }
-
- bool is_native() const { return GetFlag(kNative); }
+ bool is_tracking_positions() const { return track_positions_; }
bool is_calling() const {
return GetFlag(kDeferredCalling) || GetFlag(kNonDeferredCalling);
@@ -286,17 +210,25 @@ class CompilationInfo {
bool is_context_specializing() const { return GetFlag(kContextSpecializing); }
+ void MarkAsTypeFeedbackEnabled() { SetFlag(kTypeFeedbackEnabled); }
+
+ bool is_type_feedback_enabled() const {
+ return GetFlag(kTypeFeedbackEnabled);
+ }
+
void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
- void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
+ void MarkAsBuiltinInliningEnabled() { SetFlag(kBuiltinInliningEnabled); }
- bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
+ bool is_builtin_inlining_enabled() const {
+ return GetFlag(kBuiltinInliningEnabled);
+ }
- void MarkAsToplevel() { SetFlag(kToplevel); }
+ void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
- bool is_toplevel() const { return GetFlag(kToplevel); }
+ bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
void MarkAsSplittingEnabled() { SetFlag(kSplittingEnabled); }
@@ -307,46 +239,11 @@ class CompilationInfo {
!is_debug();
}
- void SetParseRestriction(ParseRestriction restriction) {
- SetFlag(kParseRestriction, restriction != NO_PARSE_RESTRICTION);
- }
-
- ParseRestriction parse_restriction() const {
- return GetFlag(kParseRestriction) ? ONLY_SINGLE_FUNCTION_LITERAL
- : NO_PARSE_RESTRICTION;
- }
-
- void SetFunction(FunctionLiteral* literal) {
- DCHECK(function_ == NULL);
- function_ = literal;
- }
- void PrepareForCompilation(Scope* scope);
- void SetScriptScope(Scope* script_scope) {
- DCHECK(script_scope_ == NULL);
- script_scope_ = script_scope;
- }
void EnsureFeedbackVector();
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
}
void SetCode(Handle<Code> code) { code_ = code; }
- void SetExtension(v8::Extension* extension) {
- DCHECK(!is_lazy());
- extension_ = extension;
- }
- void SetCachedData(ScriptData** cached_data,
- ScriptCompiler::CompileOptions compile_options) {
- compile_options_ = compile_options;
- if (compile_options == ScriptCompiler::kNoCompileOptions) {
- cached_data_ = NULL;
- } else {
- DCHECK(!is_lazy());
- cached_data_ = cached_data;
- }
- }
- void SetContext(Handle<Context> context) {
- context_ = context;
- }
void MarkCompilingForDebugging() { SetFlag(kCompilingForDebugging); }
bool IsCompilingForDebugging() { return GetFlag(kCompilingForDebugging); }
@@ -373,13 +270,18 @@ class CompilationInfo {
bool IsOptimizable() const { return mode_ == BASE; }
bool IsStub() const { return mode_ == STUB; }
void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
- DCHECK(!shared_info_.is_null());
+ DCHECK(!shared_info().is_null());
SetMode(OPTIMIZE);
osr_ast_id_ = osr_ast_id;
unoptimized_code_ = unoptimized;
optimization_id_ = isolate()->NextOptimizationId();
}
+ void SetStub(CodeStub* code_stub) {
+ SetMode(STUB);
+ code_stub_ = code_stub;
+ }
+
// Deoptimization support.
bool HasDeoptimizationSupport() const {
return GetFlag(kDeoptimizationSupport);
@@ -409,12 +311,8 @@ class CompilationInfo {
void RollbackDependencies();
- void SaveHandles() {
- SaveHandle(&closure_);
- SaveHandle(&shared_info_);
- SaveHandle(&context_);
- SaveHandle(&script_);
- SaveHandle(&unoptimized_code_);
+ void ReopenHandlesInNewHandleScope() {
+ unoptimized_code_ = Handle<Code>(*unoptimized_code_);
}
void AbortOptimization(BailoutReason reason) {
@@ -454,14 +352,16 @@ class CompilationInfo {
return result;
}
- List<InlinedFunctionInfo>* inlined_function_infos() {
- return inlined_function_infos_;
+ int start_position_for(uint32_t inlining_id) {
+ return inlined_function_infos_.at(inlining_id).start_position;
}
- List<int>* inlining_id_to_function_id() {
- return inlining_id_to_function_id_;
+ const std::vector<InlinedFunctionInfo>& inlined_function_infos() {
+ return inlined_function_infos_;
}
+
+ void LogDeoptCallPosition(int pc_offset, int inlining_id);
int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
- SourcePosition position);
+ SourcePosition position, int pareint_id);
Handle<Foreign> object_wrapper() {
if (object_wrapper_.is_null()) {
@@ -482,18 +382,11 @@ class CompilationInfo {
}
bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
- return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure_);
+ return osr_ast_id_ == osr_ast_id && function.is_identical_to(closure());
}
int optimization_id() const { return optimization_id_; }
- AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
- void SetAstValueFactory(AstValueFactory* ast_value_factory,
- bool owned = true) {
- ast_value_factory_ = ast_value_factory;
- ast_value_factory_owned_ = owned;
- }
-
int osr_expr_stack_height() { return osr_expr_stack_height_; }
void set_osr_expr_stack_height(int height) {
DCHECK(height >= 0);
@@ -507,16 +400,15 @@ class CompilationInfo {
bool is_simple_parameter_list();
protected:
- CompilationInfo(Handle<SharedFunctionInfo> shared_info,
- Zone* zone);
- CompilationInfo(ScriptCompiler::ExternalSourceStream* source_stream,
- ScriptCompiler::StreamedSource::Encoding encoding,
- Isolate* isolate, Zone* zone);
+ ParseInfo* parse_info_;
+ void DisableFutureOptimization() {
+ if (GetFlag(kDisableFutureOptimization) && has_shared_info()) {
+ shared_info()->DisableOptimization(bailout_reason());
+ }
+ }
private:
- Isolate* isolate_;
-
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
@@ -529,7 +421,10 @@ class CompilationInfo {
STUB
};
- void Initialize(Isolate* isolate, Mode mode, Zone* zone);
+ CompilationInfo(ParseInfo* parse_info, CodeStub* code_stub, Mode mode,
+ Isolate* isolate, Zone* zone);
+
+ Isolate* isolate_;
void SetMode(Mode mode) {
mode_ = mode;
@@ -545,35 +440,11 @@ class CompilationInfo {
unsigned flags_;
- // Fields filled in by the compilation pipeline.
- // AST filled in by the parser.
- FunctionLiteral* function_;
- // The scope of the function literal as a convenience. Set to indicate
- // that scopes have been analyzed.
- Scope* scope_;
- // The script scope provided as a convenience.
- Scope* script_scope_;
// For compiled stubs, the stub object
CodeStub* code_stub_;
// The compiled code.
Handle<Code> code_;
- // Possible initial inputs to the compilation process.
- Handle<JSFunction> closure_;
- Handle<SharedFunctionInfo> shared_info_;
- Handle<Script> script_;
- ScriptCompiler::ExternalSourceStream* source_stream_; // Not owned.
- ScriptCompiler::StreamedSource::Encoding source_stream_encoding_;
-
- // Fields possibly needed for eager compilation, NULL by default.
- v8::Extension* extension_;
- ScriptData** cached_data_;
- ScriptCompiler::CompileOptions compile_options_;
-
- // The context of the caller for eval code, and the script context for a
- // global script. Will be a null handle otherwise.
- Handle<Context> context_;
-
// Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
Handle<TypeFeedbackVector> feedback_vector_;
@@ -593,21 +464,13 @@ class CompilationInfo {
ZoneList<Handle<HeapObject> >* dependencies_[DependentCode::kGroupCount];
- template<typename T>
- void SaveHandle(Handle<T> *object) {
- if (!object->is_null()) {
- Handle<T> handle(*(*object));
- *object = handle;
- }
- }
-
BailoutReason bailout_reason_;
int prologue_offset_;
List<OffsetRange>* no_frame_ranges_;
- List<InlinedFunctionInfo>* inlined_function_infos_;
- List<int>* inlining_id_to_function_id_;
+ std::vector<InlinedFunctionInfo> inlined_function_infos_;
+ bool track_positions_;
// A copy of shared_info()->opt_count() to avoid handle deref
// during graph optimization.
@@ -620,9 +483,6 @@ class CompilationInfo {
int optimization_id_;
- AstValueFactory* ast_value_factory_;
- bool ast_value_factory_owned_;
-
// This flag is used by the main thread to track whether this compilation
// should be abandoned due to dependency change.
bool aborted_due_to_dependency_change_;
@@ -633,35 +493,6 @@ class CompilationInfo {
};
-// Exactly like a CompilationInfo, except also creates and enters a
-// Zone on construction and deallocates it on exit.
-class CompilationInfoWithZone: public CompilationInfo {
- public:
- explicit CompilationInfoWithZone(Handle<Script> script)
- : CompilationInfo(script, &zone_) {}
- explicit CompilationInfoWithZone(Handle<SharedFunctionInfo> shared_info)
- : CompilationInfo(shared_info, &zone_) {}
- explicit CompilationInfoWithZone(Handle<JSFunction> closure)
- : CompilationInfo(closure, &zone_) {}
- CompilationInfoWithZone(CodeStub* stub, Isolate* isolate)
- : CompilationInfo(stub, isolate, &zone_) {}
- CompilationInfoWithZone(ScriptCompiler::ExternalSourceStream* stream,
- ScriptCompiler::StreamedSource::Encoding encoding,
- Isolate* isolate)
- : CompilationInfo(stream, encoding, isolate, &zone_) {}
-
- // Virtual destructor because a CompilationInfoWithZone has to exit the
- // zone scope and get rid of dependent maps even when the destructor is
- // called when cast as a CompilationInfo.
- virtual ~CompilationInfoWithZone() {
- RollbackDependencies();
- }
-
- private:
- Zone zone_;
-};
-
-
// A wrapper around a CompilationInfo that detaches the Handles from
// the underlying DeferredHandleScope and stores them in info_ on
// destruction.
@@ -786,9 +617,9 @@ class Compiler : public AllStatic {
Handle<JSFunction> function);
// Parser::Parse, then Compiler::Analyze.
- static bool ParseAndAnalyze(CompilationInfo* info);
+ static bool ParseAndAnalyze(ParseInfo* info);
// Rewrite, analyze scopes, and renumber.
- static bool Analyze(CompilationInfo* info);
+ static bool Analyze(ParseInfo* info);
// Adds deoptimization support, requires ParseAndAnalyze.
static bool EnsureDeoptimizationSupport(CompilationInfo* info);
@@ -807,11 +638,13 @@ class Compiler : public AllStatic {
static Handle<SharedFunctionInfo> CompileScript(
Handle<String> source, Handle<Object> script_name, int line_offset,
int column_offset, bool is_debugger_script, bool is_shared_cross_origin,
- Handle<Context> context, v8::Extension* extension,
- ScriptData** cached_data, ScriptCompiler::CompileOptions compile_options,
+ Handle<Object> source_map_url, Handle<Context> context,
+ v8::Extension* extension, ScriptData** cached_data,
+ ScriptCompiler::CompileOptions compile_options,
NativesFlag is_natives_code, bool is_module);
- static Handle<SharedFunctionInfo> CompileStreamedScript(CompilationInfo* info,
+ static Handle<SharedFunctionInfo> CompileStreamedScript(Handle<Script> script,
+ ParseInfo* info,
int source_length);
// Create a shared function info object (the code may be lazily compiled).
@@ -834,8 +667,9 @@ class Compiler : public AllStatic {
// On failure, return the empty handle.
static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
+ // TODO(titzer): move this method out of the compiler.
static bool DebuggerWantsEagerCompilation(
- CompilationInfo* info, bool allow_lazy_without_ctx = false);
+ Isolate* isolate, bool allow_lazy_without_ctx = false);
};
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 8c8e53092b..c69f22cb87 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -61,7 +61,8 @@ FieldAccess AccessBuilder::ForMapInstanceType() {
// static
FieldAccess AccessBuilder::ForStringLength() {
return {kTaggedBase, String::kLengthOffset, Handle<Name>(),
- Type::SignedSmall(), kMachAnyTagged};
+ Type::Intersect(Type::UnsignedSmall(), Type::TaggedSigned()),
+ kMachAnyTagged};
}
@@ -82,6 +83,12 @@ FieldAccess AccessBuilder::ForContextSlot(size_t index) {
// static
+FieldAccess AccessBuilder::ForStatsCounter() {
+ return {kUntaggedBase, 0, MaybeHandle<Name>(), Type::Signed32(), kMachInt32};
+}
+
+
+// static
ElementAccess AccessBuilder::ForFixedArrayElement() {
return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
}
@@ -115,6 +122,21 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
return {kUntaggedBase, 0, Type::None(), kMachNone};
}
+
+// static
+ElementAccess AccessBuilder::ForSeqStringChar(String::Encoding encoding) {
+ switch (encoding) {
+ case String::ONE_BYTE_ENCODING:
+ return {kTaggedBase, SeqString::kHeaderSize, Type::Unsigned32(),
+ kMachUint8};
+ case String::TWO_BYTE_ENCODING:
+ return {kTaggedBase, SeqString::kHeaderSize, Type::Unsigned32(),
+ kMachUint16};
+ }
+ UNREACHABLE();
+ return {kUntaggedBase, 0, Type::None(), kMachNone};
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index d6385e444d..ddbad8c64e 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -12,7 +12,7 @@ namespace internal {
namespace compiler {
// This access builder provides a set of static methods constructing commonly
-// used FieldAccess and ElementAccess descriptors. These descriptors server as
+// used FieldAccess and ElementAccess descriptors. These descriptors serve as
// parameters to simplified load/store operators.
class AccessBuilder FINAL : public AllStatic {
public:
@@ -46,6 +46,9 @@ class AccessBuilder FINAL : public AllStatic {
// Provides access Context slots.
static FieldAccess ForContextSlot(size_t index);
+ // Provides access to the backing store of a StatsCounter.
+ static FieldAccess ForStatsCounter();
+
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
@@ -53,6 +56,9 @@ class AccessBuilder FINAL : public AllStatic {
static ElementAccess ForTypedArrayElement(ExternalArrayType type,
bool is_external);
+ // Provides access to the charaters of sequential strings.
+ static ElementAccess ForSeqStringChar(String::Encoding encoding);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
};
diff --git a/deps/v8/src/compiler/all-nodes.cc b/deps/v8/src/compiler/all-nodes.cc
index b055a68c08..ed4a218c2b 100644
--- a/deps/v8/src/compiler/all-nodes.cc
+++ b/deps/v8/src/compiler/all-nodes.cc
@@ -4,16 +4,16 @@
#include "src/compiler/all-nodes.h"
+#include "src/compiler/graph.h"
+
namespace v8 {
namespace internal {
namespace compiler {
AllNodes::AllNodes(Zone* local_zone, const Graph* graph)
- : live(local_zone),
- gray(local_zone),
- state(graph->NodeCount(), AllNodes::kDead, local_zone) {
+ : live(local_zone), is_live(graph->NodeCount(), false, local_zone) {
Node* end = graph->end();
- state[end->id()] = AllNodes::kLive;
+ is_live[end->id()] = true;
live.push_back(end);
// Find all live nodes reachable from end.
for (size_t i = 0; i < live.size(); i++) {
@@ -26,23 +26,14 @@ AllNodes::AllNodes(Zone* local_zone, const Graph* graph)
// TODO(titzer): print a warning.
continue;
}
- if (state[input->id()] != AllNodes::kLive) {
+ if (!is_live[input->id()]) {
+ is_live[input->id()] = true;
live.push_back(input);
- state[input->id()] = AllNodes::kLive;
}
}
}
-
- // Find all nodes that are not reachable from end that use live nodes.
- for (size_t i = 0; i < live.size(); i++) {
- for (Node* const use : live[i]->uses()) {
- if (state[use->id()] == AllNodes::kDead) {
- gray.push_back(use);
- state[use->id()] = AllNodes::kGray;
- }
- }
- }
-}
}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/all-nodes.h b/deps/v8/src/compiler/all-nodes.h
index e6a83ef623..700f0071b1 100644
--- a/deps/v8/src/compiler/all-nodes.h
+++ b/deps/v8/src/compiler/all-nodes.h
@@ -5,7 +5,6 @@
#ifndef V8_COMPILER_ALL_NODES_H_
#define V8_COMPILER_ALL_NODES_H_
-#include "src/compiler/graph.h"
#include "src/compiler/node.h"
#include "src/zone-containers.h"
@@ -17,25 +16,23 @@ namespace compiler {
// from end.
class AllNodes {
public:
- // Constructor. Traverses the graph and builds the {live} and {gray} sets.
+ // Constructor. Traverses the graph and builds the {live} sets.
AllNodes(Zone* local_zone, const Graph* graph);
bool IsLive(Node* node) {
- return node != nullptr && node->id() < static_cast<int>(state.size()) &&
- state[node->id()] == kLive;
+ if (!node) return false;
+ size_t id = node->id();
+ return id < is_live.size() && is_live[id];
}
NodeVector live; // Nodes reachable from end.
- NodeVector gray; // Nodes themselves not reachable from end, but that
- // appear in use lists of live nodes.
private:
- enum State { kDead, kGray, kLive };
-
- ZoneVector<State> state;
+ BoolVector is_live;
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_ALL_NODES_H_
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index 1ff7ea3acc..79767a84b3 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -26,11 +26,11 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- SwVfpRegister OutputFloat32Register(int index = 0) {
+ SwVfpRegister OutputFloat32Register(size_t index = 0) {
return ToFloat32Register(instr_->OutputAt(index));
}
- SwVfpRegister InputFloat32Register(int index) {
+ SwVfpRegister InputFloat32Register(size_t index) {
return ToFloat32Register(instr_->InputAt(index));
}
@@ -38,11 +38,11 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
return ToFloat64Register(op).low();
}
- LowDwVfpRegister OutputFloat64Register(int index = 0) {
+ LowDwVfpRegister OutputFloat64Register(size_t index = 0) {
return ToFloat64Register(instr_->OutputAt(index));
}
- LowDwVfpRegister InputFloat64Register(int index) {
+ LowDwVfpRegister InputFloat64Register(size_t index) {
return ToFloat64Register(instr_->InputAt(index));
}
@@ -62,7 +62,7 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
return LeaveCC;
}
- Operand InputImmediate(int index) {
+ Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
@@ -83,8 +83,8 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
return Operand::Zero();
}
- Operand InputOperand2(int first_index) {
- const int index = first_index;
+ Operand InputOperand2(size_t first_index) {
+ const size_t index = first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
case kMode_Offset_RI:
@@ -115,8 +115,8 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
return Operand::Zero();
}
- MemOperand InputOffset(int* first_index) {
- const int index = *first_index;
+ MemOperand InputOffset(size_t* first_index) {
+ const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
case kMode_Operand2_I:
@@ -141,7 +141,7 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(r0);
}
- MemOperand InputOffset(int first_index = 0) {
+ MemOperand InputOffset(size_t first_index = 0) {
return InputOffset(&first_index);
}
@@ -313,7 +313,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Call(ip);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -328,7 +328,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(ip);
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -348,6 +348,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// don't emit code for nops.
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ break;
+ }
case kArchRet:
AssembleReturn();
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -483,6 +489,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputInt32(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
+ case kArmClz:
+ __ clz(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmCmp:
__ cmp(i.InputRegister(0), i.InputOperand2(1));
DCHECK_EQ(SetCC, i.OutputSBit());
@@ -558,16 +568,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArmVsqrtF64:
__ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVfloorF64:
+ case kArmVrintmF64:
__ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVceilF64:
+ case kArmVrintpF64:
__ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVroundTruncateF64:
+ case kArmVrintzF64:
__ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
- case kArmVroundTiesAwayF64:
+ case kArmVrintaF64:
__ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
break;
case kArmVnegF64:
@@ -611,6 +621,27 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
+ case kArmVmovLowU32F64:
+ __ VmovLow(i.OutputRegister(), i.InputFloat64Register(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovLowF64U32:
+ __ VmovLow(i.OutputFloat64Register(), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovHighU32F64:
+ __ VmovHigh(i.OutputRegister(), i.InputFloat64Register(0));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovHighF64U32:
+ __ VmovHigh(i.OutputFloat64Register(), i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
+ case kArmVmovF64U32U32:
+ __ vmov(i.OutputFloat64Register(), i.InputRegister(0),
+ i.InputRegister(1));
+ DCHECK_EQ(LeaveCC, i.OutputSBit());
+ break;
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -620,7 +651,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmStrb: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ strb(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -633,7 +664,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ldrsh(i.OutputRegister(), i.InputOffset());
break;
case kArmStrh: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ strh(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -643,7 +674,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ldr(i.OutputRegister(), i.InputOffset());
break;
case kArmStr: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ str(i.InputRegister(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -655,7 +686,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kArmVstrF32: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ vstr(i.InputFloat32Register(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -666,7 +697,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVstrF64: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.InputOffset(&index);
__ vstr(i.InputFloat64Register(index), operand);
DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -740,7 +771,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
@@ -775,6 +806,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
ArmOperandConverter i(this, instr);
Register input = i.InputRegister(0);
size_t const case_count = instr->InputCount() - 2;
+ __ CheckConstPool(true, true);
__ cmp(input, Operand(case_count));
__ BlockConstPoolFor(case_count + 2);
__ ldr(pc, MemOperand(pc, input, LSL, 2), lo);
@@ -785,9 +817,10 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
@@ -839,6 +872,8 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ ldr(r1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
stack_slots -= frame()->GetOsrStackSlotCount();
}
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index ecd0b2d70b..404b1e0aca 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -15,6 +15,7 @@ namespace compiler {
V(ArmAdd) \
V(ArmAnd) \
V(ArmBic) \
+ V(ArmClz) \
V(ArmCmp) \
V(ArmCmn) \
V(ArmTst) \
@@ -53,16 +54,21 @@ namespace compiler {
V(ArmVmodF64) \
V(ArmVnegF64) \
V(ArmVsqrtF64) \
- V(ArmVfloorF64) \
- V(ArmVceilF64) \
- V(ArmVroundTruncateF64) \
- V(ArmVroundTiesAwayF64) \
+ V(ArmVrintmF64) \
+ V(ArmVrintpF64) \
+ V(ArmVrintzF64) \
+ V(ArmVrintaF64) \
V(ArmVcvtF32F64) \
V(ArmVcvtF64F32) \
V(ArmVcvtF64S32) \
V(ArmVcvtF64U32) \
V(ArmVcvtS32F64) \
V(ArmVcvtU32F64) \
+ V(ArmVmovLowU32F64) \
+ V(ArmVmovLowF64U32) \
+ V(ArmVmovHighU32F64) \
+ V(ArmVmovHighF64U32) \
+ V(ArmVmovF64U32U32) \
V(ArmVldrF32) \
V(ArmVstrF32) \
V(ArmVldrF64) \
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index 47511a1ebf..276b810ed2 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -239,9 +239,8 @@ void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -579,9 +578,8 @@ void VisitShift(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(outputs), output_count);
DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -645,6 +643,12 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmClz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
ArmOperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -956,6 +960,18 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
ArmOperandGenerator g(this);
Float64BinopMatcher m(node);
if (m.left().IsMinusZero()) {
+ if (m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kArmVrintpF64, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
Emit(kArmVnegF64, g.DefineAsRegister(node),
g.UseRegister(m.right().node()));
return;
@@ -988,37 +1004,34 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
ArmOperandGenerator g(this);
Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVfloorF64, node);
-}
-
-
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVceilF64, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRRFloat64(this, kArmVrintmF64, node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVroundTruncateF64, node);
+ VisitRRFloat64(this, kArmVrintzF64, node);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
- DCHECK(CpuFeatures::IsSupported(ARMv8));
- VisitRRFloat64(this, kArmVroundTiesAwayF64, node);
+ VisitRRFloat64(this, kArmVrintaF64, node);
}
-void InstructionSelector::VisitCall(Node* node) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
ArmOperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
@@ -1043,6 +1056,13 @@ void InstructionSelector::VisitCall(Node* node) {
Emit(kArmPush, g.NoOutput(), g.UseRegister(*i));
}
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler != nullptr) {
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -1057,7 +1077,7 @@ void InstructionSelector::VisitCall(Node* node) {
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(descriptor->flags());
+ opcode |= MiscField::encode(flags);
// Emit the call instruction.
InstructionOperand* first_output =
@@ -1081,8 +1101,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
if (cont->IsBranch()) {
selector->Emit(cont->Encode(kArmVcmpF64), g.NoOutput(),
g.UseRegister(m.left().node()), rhs,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(cont->Encode(kArmVcmpF64),
@@ -1129,9 +1148,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -1236,8 +1254,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
value_operand);
@@ -1254,64 +1271,31 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
-void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
- BasicBlock** case_branches,
- int32_t* case_values, size_t case_count,
- int32_t min_value, int32_t max_value) {
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
ArmOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- InstructionOperand default_operand = g.Label(default_branch);
- // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
- // is 2^31-1, so don't assume that it's non-zero below.
- size_t value_range =
- 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
-
- // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
- // instruction.
- size_t table_space_cost = 4 + value_range;
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * case_count;
- size_t lookup_time_cost = case_count;
- if (case_count > 0 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
InstructionOperand index_operand = value_operand;
- if (min_value) {
+ if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kArmSub | AddressingModeField::encode(kMode_Operand2_I),
- index_operand, value_operand, g.TempImmediate(min_value));
- }
- size_t input_count = 2 + value_range;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = index_operand;
- std::fill(&inputs[1], &inputs[input_count], default_operand);
- for (size_t index = 0; index < case_count; ++index) {
- size_t value = case_values[index] - min_value;
- BasicBlock* branch = case_branches[index];
- DCHECK_LE(0u, value);
- DCHECK_LT(value + 2, input_count);
- inputs[value + 2] = g.Label(branch);
+ index_operand, value_operand, g.TempImmediate(sw.min_value));
}
- Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
- return;
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
- size_t input_count = 2 + case_count * 2;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = value_operand;
- inputs[1] = default_operand;
- for (size_t index = 0; index < case_count; ++index) {
- int32_t value = case_values[index];
- BasicBlock* branch = case_branches[index];
- inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
- inputs[index * 2 + 2 + 1] = g.Label(branch);
- }
- Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
+ return EmitLookupSwitch(sw, value_operand);
}
@@ -1387,6 +1371,52 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVmovLowU32F64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ ArmOperandGenerator g(this);
+ Emit(kArmVmovHighU32F64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(right),
+ g.UseRegister(left));
+ return;
+ }
+ Emit(kArmVmovLowF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ ArmOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kArmVmovF64U32U32, g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
+ return;
+ }
+ Emit(kArmVmovHighF64U32, g.DefineSameAsFirst(node), g.UseRegister(left),
+ g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
@@ -1395,8 +1425,7 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kUint32DivIsSafe;
if (CpuFeatures::IsSupported(ARMv8)) {
- flags |= MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ flags |= MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway;
}
diff --git a/deps/v8/src/compiler/arm/linkage-arm.cc b/deps/v8/src/compiler/arm/linkage-arm.cc
index 57590d3e5b..2b1faa2aca 100644
--- a/deps/v8/src/compiler/arm/linkage-arm.cc
+++ b/deps/v8/src/compiler/arm/linkage-arm.cc
@@ -51,9 +51,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties, MachineType return_type) {
return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties);
+ stack_parameter_count, flags, properties,
+ return_type);
}
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index 89c2ffb6f8..1008ddcecb 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -23,11 +23,11 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- DoubleRegister InputFloat32Register(int index) {
+ DoubleRegister InputFloat32Register(size_t index) {
return InputDoubleRegister(index).S();
}
- DoubleRegister InputFloat64Register(int index) {
+ DoubleRegister InputFloat64Register(size_t index) {
return InputDoubleRegister(index);
}
@@ -35,21 +35,23 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
- Register InputRegister32(int index) {
+ Register InputRegister32(size_t index) {
return ToRegister(instr_->InputAt(index)).W();
}
- Register InputRegister64(int index) { return InputRegister(index); }
+ Register InputRegister64(size_t index) { return InputRegister(index); }
- Operand InputImmediate(int index) {
+ Operand InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
- Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+ Operand InputOperand(size_t index) {
+ return ToOperand(instr_->InputAt(index));
+ }
- Operand InputOperand64(int index) { return InputOperand(index); }
+ Operand InputOperand64(size_t index) { return InputOperand(index); }
- Operand InputOperand32(int index) {
+ Operand InputOperand32(size_t index) {
return ToOperand32(instr_->InputAt(index));
}
@@ -57,7 +59,7 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
- Operand InputOperand2_32(int index) {
+ Operand InputOperand2_32(size_t index) {
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
return InputOperand32(index);
@@ -69,6 +71,10 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
case kMode_Operand2_R_ROR_I:
return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
+ case kMode_Operand2_R_UXTB:
+ return Operand(InputRegister32(index), UXTB);
+ case kMode_Operand2_R_UXTH:
+ return Operand(InputRegister32(index), UXTH);
case kMode_MRI:
case kMode_MRR:
break;
@@ -77,7 +83,7 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return Operand(-1);
}
- Operand InputOperand2_64(int index) {
+ Operand InputOperand2_64(size_t index) {
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
return InputOperand64(index);
@@ -89,6 +95,10 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
case kMode_Operand2_R_ROR_I:
return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
+ case kMode_Operand2_R_UXTB:
+ return Operand(InputRegister64(index), UXTB);
+ case kMode_Operand2_R_UXTH:
+ return Operand(InputRegister64(index), UXTH);
case kMode_MRI:
case kMode_MRR:
break;
@@ -97,14 +107,16 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return Operand(-1);
}
- MemOperand MemoryOperand(int* first_index) {
- const int index = *first_index;
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
case kMode_Operand2_R_LSL_I:
case kMode_Operand2_R_LSR_I:
case kMode_Operand2_R_ASR_I:
case kMode_Operand2_R_ROR_I:
+ case kMode_Operand2_R_UXTB:
+ case kMode_Operand2_R_UXTH:
break;
case kMode_MRI:
*first_index += 2;
@@ -117,7 +129,7 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(no_reg);
}
- MemOperand MemoryOperand(int first_index = 0) {
+ MemOperand MemoryOperand(size_t first_index = 0) {
return MemoryOperand(&first_index);
}
@@ -335,7 +347,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
__ Call(target);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchCallJSFunction: {
@@ -351,7 +363,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(x10);
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchJmp:
@@ -366,6 +378,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchNop:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
@@ -375,17 +393,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
- case kArm64Float64Ceil:
- __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
- break;
- case kArm64Float64Floor:
+ case kArm64Float64RoundDown:
__ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
+ case kArm64Float64RoundTiesAway:
+ __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
case kArm64Float64RoundTruncate:
__ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
- case kArm64Float64RoundTiesAway:
- __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ case kArm64Float64RoundUp:
+ __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArm64Add:
__ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2_64(1));
@@ -580,6 +598,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
i.InputInt8(2));
break;
+ case kArm64Bfi:
+ __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2),
+ i.InputInt6(3));
+ break;
case kArm64TestAndBranch32:
case kArm64TestAndBranch:
// Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
@@ -588,27 +610,22 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
// Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
break;
case kArm64Claim: {
- int words = MiscField::decode(instr->opcode());
- __ Claim(words);
+ __ Claim(i.InputInt32(0));
break;
}
case kArm64Poke: {
- int slot = MiscField::decode(instr->opcode());
- Operand operand(slot * kPointerSize);
+ Operand operand(i.InputInt32(1) * kPointerSize);
__ Poke(i.InputRegister(0), operand);
break;
}
- case kArm64PokePairZero: {
- // TODO(dcarney): test slot offset and register order.
- int slot = MiscField::decode(instr->opcode()) - 1;
- __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
- break;
- }
case kArm64PokePair: {
- int slot = MiscField::decode(instr->opcode()) - 1;
+ int slot = i.InputInt32(2) - 1;
__ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
break;
}
+ case kArm64Clz32:
+ __ Clz(i.OutputRegister32(), i.InputRegister32(0));
+ break;
case kArm64Cmp:
__ Cmp(i.InputRegister(0), i.InputOperand(1));
break;
@@ -685,6 +702,44 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArm64Uint32ToFloat64:
__ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
break;
+ case kArm64Float64ExtractLowWord32:
+ __ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
+ break;
+ case kArm64Float64ExtractHighWord32:
+ // TODO(arm64): This should use MOV (to general) when NEON is supported.
+ __ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
+ __ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
+ break;
+ case kArm64Float64InsertLowWord32: {
+ // TODO(arm64): This should use MOV (from general) when NEON is supported.
+ UseScratchRegisterScope scope(masm());
+ Register tmp = scope.AcquireX();
+ __ Fmov(tmp, i.InputFloat64Register(0));
+ __ Bfi(tmp, i.InputRegister(1), 0, 32);
+ __ Fmov(i.OutputFloat64Register(), tmp);
+ break;
+ }
+ case kArm64Float64InsertHighWord32: {
+ // TODO(arm64): This should use MOV (from general) when NEON is supported.
+ UseScratchRegisterScope scope(masm());
+ Register tmp = scope.AcquireX();
+ __ Fmov(tmp.W(), i.InputFloat32Register(0));
+ __ Bfi(tmp, i.InputRegister(1), 32, 32);
+ __ Fmov(i.OutputFloat64Register(), tmp);
+ break;
+ }
+ case kArm64Float64MoveU64: {
+ __ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
+ break;
+ }
+ case kArm64Float64Max:
+ __ Fmax(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kArm64Float64Min:
+ __ Fmin(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
case kArm64Ldrb:
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
break;
@@ -787,7 +842,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
ASSEMBLE_CHECKED_STORE_FLOAT(64);
break;
}
-}
+} // NOLINT(readability/fn_size)
// Assemble branches after this instruction.
@@ -839,7 +894,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
}
@@ -890,9 +945,10 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
@@ -937,6 +993,8 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ ldr(x1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
stack_slots -= frame()->GetOsrStackSlotCount();
}
diff --git a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
index 863451f7c5..31187f0150 100644
--- a/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/arm64/instruction-codes-arm64.h
@@ -18,6 +18,7 @@ namespace compiler {
V(Arm64And32) \
V(Arm64Bic) \
V(Arm64Bic32) \
+ V(Arm64Clz32) \
V(Arm64Cmp) \
V(Arm64Cmp32) \
V(Arm64Cmn) \
@@ -70,12 +71,12 @@ namespace compiler {
V(Arm64Sxtw) \
V(Arm64Ubfx) \
V(Arm64Ubfx32) \
+ V(Arm64Bfi) \
V(Arm64TestAndBranch32) \
V(Arm64TestAndBranch) \
V(Arm64CompareAndBranch32) \
V(Arm64Claim) \
V(Arm64Poke) \
- V(Arm64PokePairZero) \
V(Arm64PokePair) \
V(Arm64Float64Cmp) \
V(Arm64Float64Add) \
@@ -84,16 +85,23 @@ namespace compiler {
V(Arm64Float64Div) \
V(Arm64Float64Mod) \
V(Arm64Float64Sqrt) \
- V(Arm64Float64Floor) \
- V(Arm64Float64Ceil) \
- V(Arm64Float64RoundTruncate) \
+ V(Arm64Float64RoundDown) \
V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float64RoundUp) \
V(Arm64Float32ToFloat64) \
V(Arm64Float64ToFloat32) \
V(Arm64Float64ToInt32) \
V(Arm64Float64ToUint32) \
V(Arm64Int32ToFloat64) \
V(Arm64Uint32ToFloat64) \
+ V(Arm64Float64ExtractLowWord32) \
+ V(Arm64Float64ExtractHighWord32) \
+ V(Arm64Float64InsertLowWord32) \
+ V(Arm64Float64InsertHighWord32) \
+ V(Arm64Float64MoveU64) \
+ V(Arm64Float64Max) \
+ V(Arm64Float64Min) \
V(Arm64LdrS) \
V(Arm64StrS) \
V(Arm64LdrD) \
@@ -124,13 +132,15 @@ namespace compiler {
// I = immediate (handle, external, int32)
// MRI = [register + immediate]
// MRR = [register + register]
-#define TARGET_ADDRESSING_MODE_LIST(V) \
- V(MRI) /* [%r0 + K] */ \
- V(MRR) /* [%r0 + %r1] */ \
- V(Operand2_R_LSL_I) /* %r0 LSL K */ \
- V(Operand2_R_LSR_I) /* %r0 LSR K */ \
- V(Operand2_R_ASR_I) /* %r0 ASR K */ \
- V(Operand2_R_ROR_I) /* %r0 ROR K */
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */ \
+ V(Operand2_R_LSL_I) /* %r0 LSL K */ \
+ V(Operand2_R_LSR_I) /* %r0 LSR K */ \
+ V(Operand2_R_ASR_I) /* %r0 ASR K */ \
+ V(Operand2_R_ROR_I) /* %r0 ROR K */ \
+ V(Operand2_R_UXTB) /* %r0 UXTB (unsigned extend byte) */ \
+ V(Operand2_R_UXTH) /* %r0 UXTH (unsigned extend halfword) */
} // namespace internal
} // namespace compiler
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index 6afd3e8c12..427486b25a 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -167,6 +167,25 @@ static bool TryMatchAnyShift(InstructionSelector* selector, Node* node,
}
+static bool TryMatchAnyExtend(InstructionSelector* selector, Node* node,
+ InstructionCode* opcode) {
+ NodeMatcher nm(node);
+ if (nm.IsWord32And()) {
+ Int32BinopMatcher m(node);
+ if (m.right().HasValue()) {
+ if (m.right().Value() == 0xff) {
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_UXTB);
+ return true;
+ } else if (m.right().Value() == 0xffff) {
+ *opcode |= AddressingModeField::encode(kMode_Operand2_R_UXTH);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
// Shared routine for multiple binary operations.
template <typename Matcher>
static void VisitBinop(InstructionSelector* selector, Node* node,
@@ -178,28 +197,38 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
size_t input_count = 0;
InstructionOperand outputs[2];
size_t output_count = 0;
- bool try_ror_operand = true;
+ bool is_add_sub = false;
if (m.IsInt32Add() || m.IsInt64Add() || m.IsInt32Sub() || m.IsInt64Sub()) {
- try_ror_operand = false;
+ is_add_sub = true;
}
if (g.CanBeImmediate(m.right().node(), operand_mode)) {
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseImmediate(m.right().node());
} else if (TryMatchAnyShift(selector, m.right().node(), &opcode,
- try_ror_operand)) {
+ !is_add_sub)) {
Matcher m_shift(m.right().node());
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
} else if (m.HasProperty(Operator::kCommutative) &&
TryMatchAnyShift(selector, m.left().node(), &opcode,
- try_ror_operand)) {
+ !is_add_sub)) {
Matcher m_shift(m.left().node());
inputs[input_count++] = g.UseRegister(m.right().node());
inputs[input_count++] = g.UseRegister(m_shift.left().node());
inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+ } else if (is_add_sub &&
+ TryMatchAnyExtend(selector, m.right().node(), &opcode)) {
+ Matcher mright(m.right().node());
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseRegister(mright.left().node());
+ } else if (is_add_sub && m.HasProperty(Operator::kCommutative) &&
+ TryMatchAnyExtend(selector, m.left().node(), &opcode)) {
+ Matcher mleft(m.left().node());
+ inputs[input_count++] = g.UseRegister(m.right().node());
+ inputs[input_count++] = g.UseRegister(mleft.left().node());
} else {
inputs[input_count++] = g.UseRegister(m.left().node());
inputs[input_count++] = g.UseRegister(m.right().node());
@@ -220,9 +249,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -709,6 +737,12 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1033,6 +1067,20 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ Arm64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kArm64Float64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
VisitRRRFloat64(this, kArm64Float64Sub, node);
}
@@ -1055,18 +1103,31 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitRRFloat64(this, kArm64Float64Sqrt, node);
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kArm64Float64Max, g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRRFloat64(this, kArm64Float64Floor, node);
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kArm64Float64Min, g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRRFloat64(this, kArm64Float64Sqrt, node);
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRRFloat64(this, kArm64Float64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRRFloat64(this, kArm64Float64RoundDown, node);
}
@@ -1080,7 +1141,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Arm64OperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
@@ -1107,7 +1168,7 @@ void InstructionSelector::VisitCall(Node* node) {
if (aligned_push_count > 0) {
// TODO(dcarney): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
- Emit(kArm64Claim | MiscField::encode(aligned_push_count), g.NoOutput());
+ Emit(kArm64Claim, g.NoOutput(), g.TempImmediate(aligned_push_count));
}
// Move arguments to the stack.
{
@@ -1115,18 +1176,26 @@ void InstructionSelector::VisitCall(Node* node) {
// Emit the uneven pushes.
if (pushed_count_uneven) {
Node* input = buffer.pushed_nodes[slot];
- Emit(kArm64Poke | MiscField::encode(slot), g.NoOutput(),
- g.UseRegister(input));
+ Emit(kArm64Poke, g.NoOutput(), g.UseRegister(input),
+ g.TempImmediate(slot));
slot--;
}
// Now all pushes can be done in pairs.
for (; slot >= 0; slot -= 2) {
- Emit(kArm64PokePair | MiscField::encode(slot), g.NoOutput(),
+ Emit(kArm64PokePair, g.NoOutput(),
g.UseRegister(buffer.pushed_nodes[slot]),
- g.UseRegister(buffer.pushed_nodes[slot - 1]));
+ g.UseRegister(buffer.pushed_nodes[slot - 1]),
+ g.TempImmediate(slot));
}
}
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler != nullptr) {
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -1141,7 +1210,7 @@ void InstructionSelector::VisitCall(Node* node) {
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(descriptor->flags());
+ opcode |= MiscField::encode(flags);
// Emit the call instruction.
InstructionOperand* first_output =
@@ -1161,8 +1230,7 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1243,25 +1311,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value)) {
- if (value->opcode() == IrOpcode::kWord32Equal) {
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- } else if (value->opcode() == IrOpcode::kWord64Equal) {
- Int64BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
+ while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
} else {
break;
}
@@ -1354,8 +1409,7 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros32(m.right().Value())),
- g.Label(cont.true_block()),
- g.Label(cont.false_block()))->MarkAsControl();
+ g.Label(cont.true_block()), g.Label(cont.false_block()));
return;
}
return VisitWordCompare(this, value, kArm64Tst32, &cont, true,
@@ -1372,8 +1426,7 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
g.UseRegister(m.left().node()),
g.TempImmediate(
base::bits::CountTrailingZeros64(m.right().Value())),
- g.Label(cont.true_block()),
- g.Label(cont.false_block()))->MarkAsControl();
+ g.Label(cont.true_block()), g.Label(cont.false_block()));
return;
}
return VisitWordCompare(this, value, kArm64Tst, &cont, true,
@@ -1387,68 +1440,35 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
// Branch could not be combined with a compare, compare against 0 and branch.
Emit(cont.Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont.true_block()),
- g.Label(cont.false_block()))->MarkAsControl();
+ g.Label(cont.false_block()));
}
-void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
- BasicBlock** case_branches,
- int32_t* case_values, size_t case_count,
- int32_t min_value, int32_t max_value) {
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Arm64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- InstructionOperand default_operand = g.Label(default_branch);
-
- // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
- // is 2^31-1, so don't assume that it's non-zero below.
- size_t value_range =
- 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
- // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
- // instruction.
- size_t table_space_cost = 4 + value_range;
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * case_count;
- size_t lookup_time_cost = case_count;
- if (case_count > 0 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
InstructionOperand index_operand = value_operand;
- if (min_value) {
+ if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kArm64Sub32, index_operand, value_operand,
- g.TempImmediate(min_value));
+ g.TempImmediate(sw.min_value));
}
- size_t input_count = 2 + value_range;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = index_operand;
- std::fill(&inputs[1], &inputs[input_count], default_operand);
- for (size_t index = 0; index < case_count; ++index) {
- size_t value = case_values[index] - min_value;
- BasicBlock* branch = case_branches[index];
- DCHECK_LE(0u, value);
- DCHECK_LT(value + 2, input_count);
- inputs[value + 2] = g.Label(branch);
- }
- Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
- return;
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
- size_t input_count = 2 + case_count * 2;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = value_operand;
- inputs[1] = default_operand;
- for (size_t index = 0; index < case_count; ++index) {
- int32_t value = case_values[index];
- BasicBlock* branch = case_branches[index];
- inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
- inputs[index * 2 + 2 + 1] = g.Label(branch);
- }
- Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
+ return EmitLookupSwitch(sw, value_operand);
}
@@ -1582,13 +1602,63 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+ CanCover(node, left)) {
+ Node* right_of_left = left->InputAt(1);
+ Emit(kArm64Bfi, g.DefineSameAsFirst(right), g.UseRegister(right),
+ g.UseRegister(right_of_left), g.TempImmediate(32),
+ g.TempImmediate(32));
+ Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(right));
+ return;
+ }
+ Emit(kArm64Float64InsertLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ Arm64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+ CanCover(node, left)) {
+ Node* right_of_left = left->InputAt(1);
+ Emit(kArm64Bfi, g.DefineSameAsFirst(left), g.UseRegister(right_of_left),
+ g.UseRegister(right), g.TempImmediate(32), g.TempImmediate(32));
+ Emit(kArm64Float64MoveU64, g.DefineAsRegister(node), g.UseRegister(left));
+ return;
+ }
+ Emit(kArm64Float64InsertHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ return MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway |
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe;
diff --git a/deps/v8/src/compiler/arm64/linkage-arm64.cc b/deps/v8/src/compiler/arm64/linkage-arm64.cc
index 57945fd77f..745eb5cde6 100644
--- a/deps/v8/src/compiler/arm64/linkage-arm64.cc
+++ b/deps/v8/src/compiler/arm64/linkage-arm64.cc
@@ -51,9 +51,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties, MachineType return_type) {
return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties);
+ stack_parameter_count, flags, properties,
+ return_type);
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index f5a2e0fcad..e550f51128 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -7,11 +7,14 @@
#include "src/compiler.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/control-builders.h"
+#include "src/compiler/js-type-feedback.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/liveness-analyzer.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
+#include "src/compiler/state-values-utils.h"
#include "src/full-codegen.h"
#include "src/parser.h"
#include "src/scopes.h"
@@ -140,10 +143,10 @@ class AstGraphBuilder::ContextScope BASE_EMBEDDED {
// - TryFinallyStatement: Intercepts 'break', 'continue', 'throw' and 'return'.
class AstGraphBuilder::ControlScope BASE_EMBEDDED {
public:
- ControlScope(AstGraphBuilder* builder, int stack_delta)
+ explicit ControlScope(AstGraphBuilder* builder)
: builder_(builder),
outer_(builder->execution_control()),
- stack_delta_(stack_delta) {
+ stack_height_(builder->environment()->stack_height()) {
builder_->set_execution_control(this); // Push.
}
@@ -190,12 +193,12 @@ class AstGraphBuilder::ControlScope BASE_EMBEDDED {
Environment* environment() { return builder_->environment(); }
AstGraphBuilder* builder() const { return builder_; }
- int stack_delta() const { return stack_delta_; }
+ int stack_height() const { return stack_height_; }
private:
AstGraphBuilder* builder_;
ControlScope* outer_;
- int stack_delta_;
+ int stack_height_;
};
@@ -212,7 +215,6 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
struct Entry {
Command command; // The command type being applied on this path.
Statement* statement; // The target statement for the command or {NULL}.
- Node* value; // The passed value node for the command or {NULL}.
Node* token; // A token identifying this particular path.
};
@@ -220,7 +222,7 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
// generates a new dispatch token that identifies one particular path.
Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
Node* token = NewPathTokenForDeferredCommand();
- deferred_.push_back({cmd, stmt, value, token});
+ deferred_.push_back({cmd, stmt, token});
return token;
}
@@ -230,7 +232,7 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
// Applies all recorded control-flow commands after the finally-block again.
// This generates a dynamic dispatch on the token from the entry point.
- void ApplyDeferredCommands(Node* token) {
+ void ApplyDeferredCommands(Node* token, Node* value) {
SwitchBuilder dispatch(owner_, static_cast<int>(deferred_.size()));
dispatch.BeginSwitch();
for (size_t i = 0; i < deferred_.size(); ++i) {
@@ -241,7 +243,7 @@ class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
for (size_t i = 0; i < deferred_.size(); ++i) {
dispatch.BeginCase(static_cast<int>(i));
owner_->execution_control()->PerformCommand(
- deferred_[i].command, deferred_[i].statement, deferred_[i].value);
+ deferred_[i].command, deferred_[i].statement, value);
dispatch.EndCase();
}
dispatch.EndSwitch();
@@ -271,7 +273,7 @@ class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
public:
ControlScopeForBreakable(AstGraphBuilder* owner, BreakableStatement* target,
ControlBuilder* control)
- : ControlScope(owner, 0), target_(target), control_(control) {}
+ : ControlScope(owner), target_(target), control_(control) {}
protected:
virtual bool Execute(Command cmd, Statement* target, Node* value) OVERRIDE {
@@ -298,8 +300,8 @@ class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
public:
ControlScopeForIteration(AstGraphBuilder* owner, IterationStatement* target,
- LoopBuilder* control, int stack_delta)
- : ControlScope(owner, stack_delta), target_(target), control_(control) {}
+ LoopBuilder* control)
+ : ControlScope(owner), target_(target), control_(control) {}
protected:
virtual bool Execute(Command cmd, Statement* target, Node* value) OVERRIDE {
@@ -328,7 +330,12 @@ class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
public:
ControlScopeForCatch(AstGraphBuilder* owner, TryCatchBuilder* control)
- : ControlScope(owner, 0), control_(control) {}
+ : ControlScope(owner), control_(control) {
+ builder()->try_nesting_level_++; // Increment nesting.
+ }
+ ~ControlScopeForCatch() {
+ builder()->try_nesting_level_--; // Decrement nesting.
+ }
protected:
virtual bool Execute(Command cmd, Statement* target, Node* value) OVERRIDE {
@@ -354,12 +361,17 @@ class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
public:
ControlScopeForFinally(AstGraphBuilder* owner, DeferredCommands* commands,
TryFinallyBuilder* control)
- : ControlScope(owner, 0), commands_(commands), control_(control) {}
+ : ControlScope(owner), commands_(commands), control_(control) {
+ builder()->try_nesting_level_++; // Increment nesting.
+ }
+ ~ControlScopeForFinally() {
+ builder()->try_nesting_level_--; // Decrement nesting.
+ }
protected:
virtual bool Execute(Command cmd, Statement* target, Node* value) OVERRIDE {
Node* token = commands_->RecordCommand(cmd, target, value);
- control_->LeaveTry(token);
+ control_->LeaveTry(token, value);
return true;
}
@@ -370,7 +382,8 @@ class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, LoopAssignmentAnalysis* loop)
+ JSGraph* jsgraph, LoopAssignmentAnalysis* loop,
+ JSTypeFeedbackTable* js_type_feedback)
: local_zone_(local_zone),
info_(info),
jsgraph_(jsgraph),
@@ -379,10 +392,15 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
globals_(0, local_zone),
execution_control_(nullptr),
execution_context_(nullptr),
+ try_nesting_level_(0),
input_buffer_size_(0),
input_buffer_(nullptr),
exit_control_(nullptr),
- loop_assignment_analysis_(loop) {
+ loop_assignment_analysis_(loop),
+ state_values_cache_(jsgraph),
+ liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
+ local_zone),
+ js_type_feedback_(js_type_feedback) {
InitializeAstVisitor(info->isolate(), local_zone);
}
@@ -420,7 +438,7 @@ Node* AstGraphBuilder::NewCurrentContextOsrValue() {
}
-bool AstGraphBuilder::CreateGraph(bool constant_context) {
+bool AstGraphBuilder::CreateGraph(bool constant_context, bool stack_check) {
Scope* scope = info()->scope();
DCHECK(graph() != NULL);
@@ -428,13 +446,13 @@ bool AstGraphBuilder::CreateGraph(bool constant_context) {
int parameter_count = info()->num_parameters();
graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
- // Initialize control scope.
- ControlScope control(this, 0);
-
// Initialize the top-level environment.
Environment env(this, scope, graph()->start());
set_environment(&env);
+ // Initialize control scope.
+ ControlScope control(this);
+
if (info()->is_osr()) {
// Use OSR normal entry as the start of the top-level environment.
// It will be replaced with {Dead} after typing and optimizations.
@@ -459,21 +477,25 @@ bool AstGraphBuilder::CreateGraph(bool constant_context) {
Node* inner_context =
BuildLocalFunctionContext(function_context_.get(), closure);
ContextScope top_context(this, scope, inner_context);
- CreateGraphBody();
+ CreateGraphBody(stack_check);
} else {
// Simply use the outer function context in building the graph.
- CreateGraphBody();
+ CreateGraphBody(stack_check);
}
// Finish the basic structure of the graph.
graph()->SetEnd(graph()->NewNode(common()->End(), exit_control()));
+ // Compute local variable liveness information and use it to relax
+ // frame states.
+ ClearNonLiveSlotsInFrameStates();
+
// Failures indicated by stack overflow.
return !HasStackOverflow();
}
-void AstGraphBuilder::CreateGraphBody() {
+void AstGraphBuilder::CreateGraphBody(bool stack_check) {
Scope* scope = info()->scope();
// Build the arguments object if it is used.
@@ -498,8 +520,10 @@ void AstGraphBuilder::CreateGraphBody() {
VisitDeclarations(scope->declarations());
// Build a stack-check before the body.
- Node* node = BuildStackCheck();
- PrepareFrameState(node, BailoutId::FunctionEntry());
+ if (stack_check) {
+ Node* node = NewNode(javascript()->StackCheck());
+ PrepareFrameState(node, BailoutId::FunctionEntry());
+ }
// Visit statements in the function body.
VisitStatements(info()->function()->body());
@@ -516,6 +540,24 @@ void AstGraphBuilder::CreateGraphBody() {
}
+void AstGraphBuilder::ClearNonLiveSlotsInFrameStates() {
+ if (!FLAG_analyze_environment_liveness) return;
+
+ NonLiveFrameStateSlotReplacer replacer(
+ &state_values_cache_, jsgraph()->UndefinedConstant(),
+ liveness_analyzer()->local_count(), local_zone());
+ Variable* arguments = info()->scope()->arguments();
+ if (arguments != nullptr && arguments->IsStackAllocated()) {
+ replacer.MarkPermanentlyLive(arguments->index());
+ }
+ liveness_analyzer()->Run(&replacer);
+ if (FLAG_trace_environment_liveness) {
+ OFStream os(stdout);
+ liveness_analyzer()->Print(os);
+ }
+}
+
+
// Left-hand side can only be a property, a global or a variable slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
@@ -538,6 +580,7 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
: builder_(builder),
parameters_count_(scope->num_parameters() + 1),
locals_count_(scope->num_stack_slots()),
+ liveness_block_(builder_->liveness_analyzer()->NewBlock()),
values_(builder_->local_zone()),
contexts_(builder_->local_zone()),
control_dependency_(control_dependency),
@@ -566,8 +609,7 @@ AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
}
-AstGraphBuilder::Environment::Environment(
- const AstGraphBuilder::Environment* copy)
+AstGraphBuilder::Environment::Environment(AstGraphBuilder::Environment* copy)
: builder_(copy->builder_),
parameters_count_(copy->parameters_count_),
locals_count_(copy->locals_count_),
@@ -584,13 +626,72 @@ AstGraphBuilder::Environment::Environment(
contexts_.reserve(copy->contexts_.size());
contexts_.insert(contexts_.begin(), copy->contexts_.begin(),
copy->contexts_.end());
+
+ if (FLAG_analyze_environment_liveness) {
+ // Split the liveness blocks.
+ copy->liveness_block_ =
+ builder_->liveness_analyzer()->NewBlock(copy->liveness_block());
+ liveness_block_ =
+ builder_->liveness_analyzer()->NewBlock(copy->liveness_block());
+ }
+}
+
+
+void AstGraphBuilder::Environment::Bind(Variable* variable, Node* node) {
+ DCHECK(variable->IsStackAllocated());
+ if (variable->IsParameter()) {
+ // The parameter indices are shifted by 1 (receiver is parameter
+ // index -1 but environment index 0).
+ values()->at(variable->index() + 1) = node;
+ } else {
+ DCHECK(variable->IsStackLocal());
+ values()->at(variable->index() + parameters_count_) = node;
+ if (FLAG_analyze_environment_liveness) {
+ liveness_block()->Bind(variable->index());
+ }
+ }
+}
+
+
+Node* AstGraphBuilder::Environment::Lookup(Variable* variable) {
+ DCHECK(variable->IsStackAllocated());
+ if (variable->IsParameter()) {
+ // The parameter indices are shifted by 1 (receiver is parameter
+ // index -1 but environment index 0).
+ return values()->at(variable->index() + 1);
+ } else {
+ DCHECK(variable->IsStackLocal());
+ if (FLAG_analyze_environment_liveness) {
+ liveness_block()->Lookup(variable->index());
+ }
+ return values()->at(variable->index() + parameters_count_);
+ }
+}
+
+
+void AstGraphBuilder::Environment::MarkAllLocalsLive() {
+ if (FLAG_analyze_environment_liveness) {
+ for (int i = 0; i < locals_count_; i++) {
+ liveness_block()->Lookup(i);
+ }
+ }
+}
+
+
+AstGraphBuilder::Environment*
+AstGraphBuilder::Environment::CopyAndShareLiveness() {
+ Environment* env = new (zone()) Environment(this);
+ if (FLAG_analyze_environment_liveness) {
+ env->liveness_block_ = liveness_block();
+ }
+ return env;
}
void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
int offset, int count) {
bool should_update = false;
- Node** env_values = (count == 0) ? NULL : &values()->at(offset);
+ Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
if (*state_values == NULL || (*state_values)->InputCount() != count) {
should_update = true;
} else {
@@ -609,18 +710,32 @@ void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
+void AstGraphBuilder::Environment::UpdateStateValuesWithCache(
+ Node** state_values, int offset, int count) {
+ Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+ *state_values = builder_->state_values_cache_.GetNodeForValues(
+ env_values, static_cast<size_t>(count));
+}
+
+
Node* AstGraphBuilder::Environment::Checkpoint(
BailoutId ast_id, OutputFrameStateCombine combine) {
+ if (!FLAG_turbo_deoptimization) return nullptr;
+
UpdateStateValues(&parameters_node_, 0, parameters_count());
- UpdateStateValues(&locals_node_, parameters_count(), locals_count());
+ UpdateStateValuesWithCache(&locals_node_, parameters_count(), locals_count());
UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
stack_height());
const Operator* op = common()->FrameState(JS_FRAME, ast_id, combine);
- return graph()->NewNode(op, parameters_node_, locals_node_, stack_node_,
- builder()->current_context(),
- builder()->jsgraph()->UndefinedConstant());
+ Node* result = graph()->NewNode(op, parameters_node_, locals_node_,
+ stack_node_, builder()->current_context(),
+ builder()->jsgraph()->UndefinedConstant());
+ if (FLAG_analyze_environment_liveness) {
+ liveness_block()->Checkpoint(result);
+ }
+ return result;
}
@@ -698,8 +813,8 @@ void AstGraphBuilder::ControlScope::PerformCommand(Command command,
Environment* env = environment()->CopyAsUnreachable();
ControlScope* current = this;
while (current != NULL) {
+ environment()->Trim(current->stack_height());
if (current->Execute(command, target, value)) break;
- environment()->Drop(current->stack_delta());
current = current->outer_;
}
builder()->set_environment(env);
@@ -708,12 +823,12 @@ void AstGraphBuilder::ControlScope::PerformCommand(Command command,
void AstGraphBuilder::ControlScope::BreakTo(BreakableStatement* stmt) {
- PerformCommand(CMD_BREAK, stmt, nullptr);
+ PerformCommand(CMD_BREAK, stmt, builder()->jsgraph()->TheHoleConstant());
}
void AstGraphBuilder::ControlScope::ContinueTo(BreakableStatement* stmt) {
- PerformCommand(CMD_CONTINUE, stmt, nullptr);
+ PerformCommand(CMD_CONTINUE, stmt, builder()->jsgraph()->TheHoleConstant());
}
@@ -1008,7 +1123,7 @@ void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
LoopBuilder while_loop(this);
while_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
- VisitIterationBody(stmt, &while_loop, 0);
+ VisitIterationBody(stmt, &while_loop);
while_loop.EndBody();
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
@@ -1023,7 +1138,7 @@ void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
VisitForTest(stmt->cond());
Node* condition = environment()->Pop();
while_loop.BreakUnless(condition);
- VisitIterationBody(stmt, &while_loop, 0);
+ VisitIterationBody(stmt, &while_loop);
while_loop.EndBody();
while_loop.EndLoop();
}
@@ -1040,7 +1155,7 @@ void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
} else {
for_loop.BreakUnless(jsgraph()->TrueConstant());
}
- VisitIterationBody(stmt, &for_loop, 0);
+ VisitIterationBody(stmt, &for_loop);
for_loop.EndBody();
VisitIfNotNull(stmt->next());
for_loop.EndLoop();
@@ -1165,7 +1280,9 @@ void AstGraphBuilder::VisitForInBody(ForInStatement* stmt) {
Node* index_inc =
NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
// TODO(jarin): provide real bailout id.
- PrepareFrameState(index_inc, BailoutId::None());
+ PrepareFrameStateAfterAndBefore(index_inc, BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ jsgraph()->EmptyFrameState());
environment()->Poke(0, index_inc);
for_loop.Continue();
is_property_missing.Else();
@@ -1179,13 +1296,17 @@ void AstGraphBuilder::VisitForInBody(ForInStatement* stmt) {
value = environment()->Pop();
// Bind value and do loop body.
VisitForInAssignment(stmt->each(), value, stmt->AssignmentId());
- VisitIterationBody(stmt, &for_loop, 5);
+ VisitIterationBody(stmt, &for_loop);
+ index = environment()->Peek(0);
for_loop.EndBody();
+
// Inc counter and continue.
Node* index_inc =
NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
// TODO(jarin): provide real bailout id.
- PrepareFrameState(index_inc, BailoutId::None());
+ PrepareFrameStateAfterAndBefore(index_inc, BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ jsgraph()->EmptyFrameState());
environment()->Poke(0, index_inc);
for_loop.EndLoop();
environment()->Drop(5);
@@ -1202,7 +1323,7 @@ void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
Node* condition = environment()->Pop();
for_loop.BreakWhen(condition);
VisitForEffect(stmt->assign_each());
- VisitIterationBody(stmt, &for_loop, 0);
+ VisitIterationBody(stmt, &for_loop);
for_loop.EndBody();
for_loop.EndLoop();
}
@@ -1216,7 +1337,10 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
try_control.BeginTry();
{
ControlScopeForCatch scope(this, &try_control);
+ STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
+ environment()->Push(current_context());
Visit(stmt->try_block());
+ environment()->Pop();
}
try_control.EndTry();
@@ -1242,6 +1366,9 @@ void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
TryFinallyBuilder try_control(this);
+ ExternalReference message_object =
+ ExternalReference::address_of_pending_message_obj(isolate());
+
// We keep a record of all paths that enter the finally-block to be able to
// dispatch to the correct continuation point after the statements in the
// finally-block have been evaluated.
@@ -1251,6 +1378,7 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// 2. By exiting the try-block with a function-local control flow transfer
// (i.e. through break/continue/return statements).
// 3. By exiting the try-block with a thrown exception.
+ Node* fallthrough_result = jsgraph()->TheHoleConstant();
ControlScope::DeferredCommands* commands =
new (zone()) ControlScope::DeferredCommands(this);
@@ -1259,17 +1387,41 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
try_control.BeginTry();
{
ControlScopeForFinally scope(this, commands, &try_control);
+ STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
+ environment()->Push(current_context());
Visit(stmt->try_block());
+ environment()->Pop();
}
- try_control.EndTry(commands->GetFallThroughToken());
+ try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
+
+ // The result value semantics depend on how the block was entered:
+ // - ReturnStatement: It represents the return value being returned.
+ // - ThrowStatement: It represents the exception being thrown.
+ // - BreakStatement/ContinueStatement: Filled with the hole.
+ // - Falling through into finally-block: Filled with the hole.
+ Node* result = try_control.GetResultValueNode();
+ Node* token = try_control.GetDispatchTokenNode();
+
+ // The result value, dispatch token and message is expected on the operand
+ // stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
+ Node* message = BuildLoadExternal(message_object, kMachAnyTagged);
+ environment()->Push(token); // TODO(mstarzinger): Cook token!
+ environment()->Push(result);
+ environment()->Push(message);
// Evaluate the finally-block.
Visit(stmt->finally_block());
try_control.EndFinally();
+ // The result value, dispatch token and message is restored from the operand
+ // stack (this is in sync with FullCodeGenerator::ExitFinallyBlock).
+ message = environment()->Pop();
+ result = environment()->Pop();
+ token = environment()->Pop(); // TODO(mstarzinger): Uncook token!
+ BuildStoreExternal(message_object, kMachAnyTagged, message);
+
// Dynamic dispatch after the finally-block.
- Node* token = try_control.GetDispatchTokenNode();
- commands->ApplyDeferredCommands(token);
+ commands->ApplyDeferredCommands(token, result);
// TODO(mstarzinger): Remove bailout once everything works.
if (!FLAG_turbo_exceptions) SetStackOverflow();
@@ -1277,9 +1429,9 @@ void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- // TODO(turbofan): Do we really need a separate reloc-info for this?
Node* node = NewNode(javascript()->CallRuntime(Runtime::kDebugBreak, 0));
PrepareFrameState(node, stmt->DebugBreakId());
+ environment()->MarkAllLocalsLive();
}
@@ -1353,8 +1505,19 @@ void AstGraphBuilder::VisitClassLiteralContents(ClassLiteral* expr) {
environment()->Push(property->is_static() ? literal : proto);
VisitForValue(property->key());
- environment()->Push(
- BuildToName(environment()->Pop(), expr->GetIdForProperty(i)));
+ Node* name = BuildToName(environment()->Pop(), expr->GetIdForProperty(i));
+ environment()->Push(name);
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ Node* check = BuildThrowIfStaticPrototype(environment()->Pop(),
+ expr->GetIdForProperty(i));
+ environment()->Push(check);
+ }
+
VisitForValue(property->value());
Node* value = environment()->Pop();
Node* key = environment()->Pop();
@@ -1502,10 +1665,9 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
if (property->emit_store()) {
VisitForValue(property->value());
Node* value = environment()->Pop();
- Unique<Name> name = MakeUnique(key->AsPropertyName());
+ Handle<Name> name = key->AsPropertyName();
Node* store =
- NewNode(javascript()->StoreNamed(language_mode(), name),
- literal, value);
+ BuildNamedStore(literal, name, value, TypeFeedbackId::None());
PrepareFrameState(store, key->id());
BuildSetHomeObject(value, literal, property->value());
} else {
@@ -1588,8 +1750,9 @@ void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
environment()->Push(literal); // Duplicate receiver.
VisitForValue(property->key());
- environment()->Push(BuildToName(environment()->Pop(),
- expr->GetIdForProperty(property_index)));
+ Node* name = BuildToName(environment()->Pop(),
+ expr->GetIdForProperty(property_index));
+ environment()->Push(name);
// TODO(mstarzinger): For ObjectLiteral::Property::PROTOTYPE the key should
// not be on the operand stack while the value is being evaluated. Come up
// with a repro for this and fix it. Also find a nice way to do so. :)
@@ -1675,11 +1838,15 @@ void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
VisitForValue(subexpr);
+ Node* frame_state_before = environment()->Checkpoint(
+ subexpr->id(), OutputFrameStateCombine::PokeAt(0));
Node* value = environment()->Pop();
Node* index = jsgraph()->Constant(i);
- Node* store = NewNode(javascript()->StoreProperty(language_mode()), literal,
- index, value);
- PrepareFrameState(store, expr->GetIdForElement(i));
+ Node* store =
+ BuildKeyedStore(literal, index, value, TypeFeedbackId::None());
+ PrepareFrameStateAfterAndBefore(store, expr->GetIdForElement(i),
+ OutputFrameStateCombine::Ignore(),
+ frame_state_before);
}
environment()->Pop(); // Array literal index.
@@ -1707,10 +1874,9 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
VisitForValue(property->obj());
Node* object = environment()->Pop();
value = environment()->Pop();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store = NewNode(javascript()->StoreNamed(language_mode(), name),
- object, value);
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store =
+ BuildNamedStore(object, name, value, TypeFeedbackId::None());
PrepareFrameState(store, bailout_id);
break;
}
@@ -1721,9 +1887,11 @@ void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
Node* key = environment()->Pop();
Node* object = environment()->Pop();
value = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(language_mode()),
- object, key, value);
- PrepareFrameState(store, bailout_id);
+ Node* store = BuildKeyedStore(object, key, value, TypeFeedbackId::None());
+ // TODO(jarin) Provide a real frame state before.
+ PrepareFrameStateAfterAndBefore(store, bailout_id,
+ OutputFrameStateCombine::Ignore(),
+ jsgraph()->EmptyFrameState());
break;
}
}
@@ -1754,6 +1922,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
// Evaluate the value and potentially handle compound assignments by loading
// the left-hand side value and performing a binary operation.
+ Node* frame_state_before_store = nullptr;
+ bool needs_frame_state_before = (assign_type == KEYED_PROPERTY);
if (expr->is_compound()) {
Node* old_value = NULL;
switch (assign_type) {
@@ -1766,11 +1936,11 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
}
case NAMED_PROPERTY: {
Node* object = environment()->Top();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = NewNode(javascript()->LoadNamed(name, pair), object);
+ old_value =
+ BuildNamedLoad(object, name, pair, property->PropertyFeedbackId());
PrepareFrameState(old_value, property->LoadId(),
OutputFrameStateCombine::Push());
break;
@@ -1780,7 +1950,8 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = NewNode(javascript()->LoadProperty(pair), object, key);
+ old_value =
+ BuildKeyedLoad(object, key, pair, property->PropertyFeedbackId());
PrepareFrameState(old_value, property->LoadId(),
OutputFrameStateCombine::Push());
break;
@@ -1788,14 +1959,29 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
}
environment()->Push(old_value);
VisitForValue(expr->value());
+ Node* frame_state_before = environment()->Checkpoint(expr->value()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = BuildBinaryOp(left, right, expr->binary_op());
- PrepareFrameState(value, expr->binary_operation()->id(),
- OutputFrameStateCombine::Push());
+ PrepareFrameStateAfterAndBefore(value, expr->binary_operation()->id(),
+ OutputFrameStateCombine::Push(),
+ frame_state_before);
environment()->Push(value);
+ if (needs_frame_state_before) {
+ frame_state_before_store = environment()->Checkpoint(
+ expr->binary_operation()->id(), OutputFrameStateCombine::PokeAt(0));
+ }
} else {
VisitForValue(expr->value());
+ if (needs_frame_state_before) {
+ // This frame state can be used for lazy-deopting from a to-number
+ // conversion if we are storing into a typed array. It is important
+ // that the frame state is usable for such lazy deopt (i.e., it has
+ // to specify how to override the value before the conversion, in this
+ // case, it overwrites the stack top).
+ frame_state_before_store = environment()->Checkpoint(
+ expr->value()->id(), OutputFrameStateCombine::PokeAt(0));
+ }
}
// Store the value.
@@ -1803,27 +1989,26 @@ void AstGraphBuilder::VisitAssignment(Assignment* expr) {
switch (assign_type) {
case VARIABLE: {
Variable* variable = expr->target()->AsVariableProxy()->var();
- BuildVariableAssignment(variable, value, expr->op(), expr->AssignmentId(),
+ BuildVariableAssignment(variable, value, expr->op(), expr->id(),
ast_context()->GetStateCombine());
break;
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store = NewNode(javascript()->StoreNamed(language_mode(), name),
- object, value);
- PrepareFrameState(store, expr->AssignmentId(),
- ast_context()->GetStateCombine());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store =
+ BuildNamedStore(object, name, value, expr->AssignmentFeedbackId());
+ PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
break;
}
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(language_mode()),
- object, key, value);
- PrepareFrameState(store, expr->AssignmentId(),
- ast_context()->GetStateCombine());
+ Node* store =
+ BuildKeyedStore(object, key, value, expr->AssignmentFeedbackId());
+ PrepareFrameStateAfterAndBefore(store, expr->id(),
+ ast_context()->GetStateCombine(),
+ frame_state_before_store);
break;
}
}
@@ -1842,16 +2027,8 @@ void AstGraphBuilder::VisitYield(Yield* expr) {
void AstGraphBuilder::VisitThrow(Throw* expr) {
VisitForValue(expr->exception());
Node* exception = environment()->Pop();
- if (FLAG_turbo_exceptions) {
- execution_control()->ThrowValue(exception);
- ast_context()->ProduceValue(exception);
- } else {
- // TODO(mstarzinger): Temporary workaround for bailout-id for debugger.
- const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
- Node* value = NewNode(op, exception);
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
- ast_context()->ProduceValue(value);
- }
+ Node* value = BuildThrowError(exception, expr->id());
+ ast_context()->ProduceValue(value);
}
@@ -1861,14 +2038,14 @@ void AstGraphBuilder::VisitProperty(Property* expr) {
if (expr->key()->IsPropertyName()) {
VisitForValue(expr->obj());
Node* object = environment()->Pop();
- Unique<Name> name = MakeUnique(expr->key()->AsLiteral()->AsPropertyName());
- value = NewNode(javascript()->LoadNamed(name, pair), object);
+ Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
+ value = BuildNamedLoad(object, name, pair, expr->PropertyFeedbackId());
} else {
VisitForValue(expr->obj());
VisitForValue(expr->key());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- value = NewNode(javascript()->LoadProperty(pair), object, key);
+ value = BuildKeyedLoad(object, key, pair, expr->PropertyFeedbackId());
}
PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
ast_context()->ProduceValue(value);
@@ -1915,13 +2092,14 @@ void AstGraphBuilder::VisitCall(Call* expr) {
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
if (property->key()->IsPropertyName()) {
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- callee_value = NewNode(javascript()->LoadNamed(name, pair), object);
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ callee_value =
+ BuildNamedLoad(object, name, pair, property->PropertyFeedbackId());
} else {
VisitForValue(property->key());
Node* key = environment()->Pop();
- callee_value = NewNode(javascript()->LoadProperty(pair), object, key);
+ callee_value =
+ BuildKeyedLoad(object, key, pair, property->PropertyFeedbackId());
}
PrepareFrameState(callee_value, property->LoadId(),
OutputFrameStateCombine::Push());
@@ -2016,10 +2194,9 @@ void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
// before arguments are being evaluated.
CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
Node* receiver_value = BuildLoadBuiltinsObject();
- Unique<String> unique = MakeUnique(name);
VectorSlotPair pair = CreateVectorSlotPair(expr->CallRuntimeFeedbackSlot());
Node* callee_value =
- NewNode(javascript()->LoadNamed(unique, pair), receiver_value);
+ BuildNamedLoad(receiver_value, name, pair, expr->CallRuntimeFeedbackId());
// TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft
// refuses to optimize functions with jsruntime calls).
PrepareFrameState(callee_value, BailoutId::None(),
@@ -2104,11 +2281,11 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
VisitForValue(property->obj());
Node* object = environment()->Top();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = NewNode(javascript()->LoadNamed(name, pair), object);
+ old_value =
+ BuildNamedLoad(object, name, pair, property->PropertyFeedbackId());
PrepareFrameState(old_value, property->LoadId(),
OutputFrameStateCombine::Push());
stack_depth = 1;
@@ -2121,7 +2298,8 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
Node* object = environment()->Peek(1);
VectorSlotPair pair =
CreateVectorSlotPair(property->PropertyFeedbackSlot());
- old_value = NewNode(javascript()->LoadProperty(pair), object, key);
+ old_value =
+ BuildKeyedLoad(object, key, pair, property->PropertyFeedbackId());
PrepareFrameState(old_value, property->LoadId(),
OutputFrameStateCombine::Push());
stack_depth = 2;
@@ -2134,15 +2312,22 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
PrepareFrameState(old_value, expr->ToNumberId(),
OutputFrameStateCombine::Push());
+ Node* frame_state_before_store =
+ assign_type == KEYED_PROPERTY
+ ? environment()->Checkpoint(expr->ToNumberId())
+ : nullptr;
+
// Save result for postfix expressions at correct stack depth.
if (is_postfix) environment()->Poke(stack_depth, old_value);
// Create node to perform +1/-1 operation.
Node* value =
BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
- // TODO(jarin) Insert proper bailout id here (will need to change
- // full code generator).
- PrepareFrameState(value, BailoutId::None());
+ // This should never deoptimize because we have converted to number
+ // before.
+ PrepareFrameStateAfterAndBefore(value, BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ jsgraph()->EmptyFrameState());
// Store the value.
switch (assign_type) {
@@ -2156,10 +2341,9 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
case NAMED_PROPERTY: {
Node* object = environment()->Pop();
- Unique<Name> name =
- MakeUnique(property->key()->AsLiteral()->AsPropertyName());
- Node* store = NewNode(javascript()->StoreNamed(language_mode(), name),
- object, value);
+ Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+ Node* store =
+ BuildNamedStore(object, name, value, expr->CountStoreFeedbackId());
environment()->Push(value);
PrepareFrameState(store, expr->AssignmentId());
environment()->Pop();
@@ -2168,10 +2352,12 @@ void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- Node* store = NewNode(javascript()->StoreProperty(language_mode()),
- object, key, value);
+ Node* store =
+ BuildKeyedStore(object, key, value, expr->CountStoreFeedbackId());
environment()->Push(value);
- PrepareFrameState(store, expr->AssignmentId());
+ PrepareFrameStateAfterAndBefore(store, expr->AssignmentId(),
+ OutputFrameStateCombine::Ignore(),
+ frame_state_before_store);
environment()->Pop();
break;
}
@@ -2194,10 +2380,13 @@ void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
default: {
VisitForValue(expr->left());
VisitForValue(expr->right());
+ Node* frame_state_before = environment()->Checkpoint(expr->right()->id());
Node* right = environment()->Pop();
Node* left = environment()->Pop();
Node* value = BuildBinaryOp(left, right, expr->op());
- PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+ PrepareFrameStateAfterAndBefore(value, expr->id(),
+ ast_context()->GetStateCombine(),
+ frame_state_before);
ast_context()->ProduceValue(value);
}
}
@@ -2293,8 +2482,8 @@ void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
- LoopBuilder* loop, int stack_delta) {
- ControlScopeForIteration scope(this, stmt, loop, stack_delta);
+ LoopBuilder* loop) {
+ ControlScopeForIteration scope(this, stmt, loop);
Visit(stmt->body());
}
@@ -2431,7 +2620,8 @@ Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
Node* check = NewNode(javascript()->StrictEqual(), receiver, undefined);
receiver_check.If(check);
receiver_check.Then();
- environment()->Push(BuildLoadGlobalProxy());
+ Node* proxy = BuildLoadGlobalProxy();
+ environment()->Push(proxy);
receiver_check.Else();
environment()->Push(receiver);
receiver_check.End();
@@ -2530,7 +2720,8 @@ Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
hole_check.If(check);
hole_check.Then();
- environment()->Push(BuildThrowReferenceError(variable, bailout_id));
+ Node* error = BuildThrowReferenceError(variable, bailout_id);
+ environment()->Push(error);
hole_check.Else();
environment()->Push(not_hole);
hole_check.End();
@@ -2538,6 +2729,28 @@ Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
}
+Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
+ BailoutId bailout_id) {
+ IfBuilder prototype_check(this);
+ Node* prototype_string =
+ jsgraph()->Constant(isolate()->factory()->prototype_string());
+ Node* check = NewNode(javascript()->StrictEqual(), name, prototype_string);
+ prototype_check.If(check);
+ prototype_check.Then();
+ {
+ const Operator* op =
+ javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError, 0);
+ Node* call = NewNode(op);
+ PrepareFrameState(call, bailout_id);
+ environment()->Push(call);
+ }
+ prototype_check.Else();
+ environment()->Push(name);
+ prototype_check.End();
+ return environment()->Pop();
+}
+
+
Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
BailoutId bailout_id,
const VectorSlotPair& feedback,
@@ -2548,10 +2761,9 @@ Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
case Variable::UNALLOCATED: {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
- Unique<Name> name = MakeUnique(variable->name());
- const Operator* op =
- javascript()->LoadNamed(name, feedback, contextual_mode);
- Node* node = NewNode(op, global);
+ Handle<Name> name = variable->name();
+ Node* node = BuildNamedLoad(global, name, feedback,
+ TypeFeedbackId::None(), contextual_mode);
PrepareFrameState(node, bailout_id, OutputFrameStateCombine::Push());
return node;
}
@@ -2657,9 +2869,9 @@ Node* AstGraphBuilder::BuildVariableAssignment(
case Variable::UNALLOCATED: {
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
- Unique<Name> name = MakeUnique(variable->name());
- const Operator* op = javascript()->StoreNamed(language_mode(), name);
- Node* store = NewNode(op, global, value);
+ Handle<Name> name = variable->name();
+ Node* store =
+ BuildNamedStore(global, name, value, TypeFeedbackId::None());
PrepareFrameState(store, bailout_id, combine);
return store;
}
@@ -2673,7 +2885,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
value = BuildHoleCheckSilent(current, value, current);
}
} else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
- // Non-initializing assignments to legacy const is
+ // Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
if (is_strict(language_mode())) {
@@ -2692,7 +2904,13 @@ Node* AstGraphBuilder::BuildVariableAssignment(
value = BuildHoleCheckThrow(current, variable, value, bailout_id);
}
} else if (mode == CONST && op != Token::INIT_CONST) {
- // Non-initializing assignments to const is exception in all modes.
+ // Assignment to const is exception in all modes.
+ Node* current = environment()->Lookup(variable);
+ if (current->op() == the_hole->op()) {
+ return BuildThrowReferenceError(variable, bailout_id);
+ } else if (value->opcode() == IrOpcode::kPhi) {
+ BuildHoleCheckThrow(current, variable, value, bailout_id);
+ }
return BuildThrowConstAssignError(bailout_id);
}
environment()->Bind(variable, value);
@@ -2707,7 +2925,7 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* current = NewNode(op, current_context());
value = BuildHoleCheckSilent(current, value, current);
} else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
- // Non-initializing assignments to legacy const is
+ // Non-initializing assignment to legacy const is
// - exception in strict mode.
// - ignored in sloppy mode.
if (is_strict(language_mode())) {
@@ -2721,7 +2939,11 @@ Node* AstGraphBuilder::BuildVariableAssignment(
Node* current = NewNode(op, current_context());
value = BuildHoleCheckThrow(current, variable, value, bailout_id);
} else if (mode == CONST && op != Token::INIT_CONST) {
- // Non-initializing assignments to const is exception in all modes.
+ // Assignment to const is exception in all modes.
+ const Operator* op =
+ javascript()->LoadContext(depth, variable->index(), false);
+ Node* current = NewNode(op, current_context());
+ BuildHoleCheckThrow(current, variable, value, bailout_id);
return BuildThrowConstAssignError(bailout_id);
}
const Operator* op = javascript()->StoreContext(depth, variable->index());
@@ -2745,10 +2967,48 @@ Node* AstGraphBuilder::BuildVariableAssignment(
}
+static inline Node* Record(JSTypeFeedbackTable* js_type_feedback, Node* node,
+ TypeFeedbackId id) {
+ if (js_type_feedback) js_type_feedback->Record(node, id);
+ return node;
+}
+
+
+Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
+ const VectorSlotPair& feedback,
+ TypeFeedbackId id) {
+ const Operator* op = javascript()->LoadProperty(feedback);
+ return Record(js_type_feedback_, NewNode(op, object, key), id);
+}
+
+
+Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
+ const VectorSlotPair& feedback,
+ TypeFeedbackId id, ContextualMode mode) {
+ const Operator* op =
+ javascript()->LoadNamed(MakeUnique(name), feedback, mode);
+ return Record(js_type_feedback_, NewNode(op, object), id);
+}
+
+
+Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
+ TypeFeedbackId id) {
+ const Operator* op = javascript()->StoreProperty(language_mode());
+ return Record(js_type_feedback_, NewNode(op, object, key, value), id);
+}
+
+
+Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
+ Node* value, TypeFeedbackId id) {
+ const Operator* op =
+ javascript()->StoreNamed(language_mode(), MakeUnique(name));
+ return Record(js_type_feedback_, NewNode(op, object, value), id);
+}
+
+
Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
- Node* field_load = NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
- jsgraph()->Int32Constant(offset - kHeapObjectTag));
- return field_load;
+ return NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
+ jsgraph()->IntPtrConstant(offset - kHeapObjectTag));
}
@@ -2775,6 +3035,23 @@ Node* AstGraphBuilder::BuildLoadGlobalProxy() {
}
+Node* AstGraphBuilder::BuildLoadExternal(ExternalReference reference,
+ MachineType type) {
+ return NewNode(jsgraph()->machine()->Load(type),
+ jsgraph()->ExternalConstant(reference),
+ jsgraph()->IntPtrConstant(0));
+}
+
+
+Node* AstGraphBuilder::BuildStoreExternal(ExternalReference reference,
+ MachineType type, Node* value) {
+ StoreRepresentation representation(type, kNoWriteBarrier);
+ return NewNode(jsgraph()->machine()->Store(representation),
+ jsgraph()->ExternalConstant(reference),
+ jsgraph()->IntPtrConstant(0), value);
+}
+
+
Node* AstGraphBuilder::BuildToBoolean(Node* input) {
// TODO(titzer): This should be in a JSOperatorReducer.
switch (input->opcode()) {
@@ -2813,17 +3090,24 @@ Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
Node* AstGraphBuilder::BuildSetHomeObject(Node* value, Node* home_object,
Expression* expr) {
if (!FunctionLiteral::NeedsHomeObject(expr)) return value;
- Unique<Name> name = MakeUnique(isolate()->factory()->home_object_symbol());
- const Operator* op = javascript()->StoreNamed(language_mode(), name);
- Node* store = NewNode(op, value, home_object);
+ Handle<Name> name = isolate()->factory()->home_object_symbol();
+ Node* store =
+ BuildNamedStore(value, name, home_object, TypeFeedbackId::None());
PrepareFrameState(store, BailoutId::None());
return store;
}
+Node* AstGraphBuilder::BuildThrowError(Node* exception, BailoutId bailout_id) {
+ const Operator* op = javascript()->CallRuntime(Runtime::kThrow, 1);
+ Node* call = NewNode(op, exception);
+ PrepareFrameState(call, bailout_id);
+ return call;
+}
+
+
Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
BailoutId bailout_id) {
- // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
Node* variable_name = jsgraph()->Constant(variable->name());
const Operator* op =
javascript()->CallRuntime(Runtime::kThrowReferenceError, 1);
@@ -2834,7 +3118,6 @@ Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
- // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
const Operator* op =
javascript()->CallRuntime(Runtime::kThrowConstAssignError, 0);
Node* call = NewNode(op);
@@ -2902,24 +3185,6 @@ Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
}
-Node* AstGraphBuilder::BuildStackCheck() {
- IfBuilder stack_check(this);
- Node* limit =
- NewNode(jsgraph()->machine()->Load(kMachPtr),
- jsgraph()->ExternalConstant(
- ExternalReference::address_of_stack_limit(isolate())),
- jsgraph()->ZeroConstant());
- Node* stack = NewNode(jsgraph()->machine()->LoadStackPointer());
- Node* tag = NewNode(jsgraph()->machine()->UintLessThan(), limit, stack);
- stack_check.If(tag, BranchHint::kTrue);
- stack_check.Then();
- stack_check.Else();
- Node* guard = NewNode(javascript()->CallRuntime(Runtime::kStackGuard, 0));
- stack_check.End();
- return guard;
-}
-
-
bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
if (info()->osr_ast_id() == stmt->OsrEntryId()) {
info()->set_osr_expr_stack_height(std::max(
@@ -2932,11 +3197,31 @@ bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
OutputFrameStateCombine combine) {
- if (OperatorProperties::HasFrameStateInput(node->op())) {
- DCHECK(NodeProperties::GetFrameStateInput(node)->opcode() ==
- IrOpcode::kDead);
+ if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
NodeProperties::ReplaceFrameStateInput(
- node, environment()->Checkpoint(ast_id, combine));
+ node, 0, environment()->Checkpoint(ast_id, combine));
+ }
+}
+
+
+void AstGraphBuilder::PrepareFrameStateAfterAndBefore(
+ Node* node, BailoutId ast_id, OutputFrameStateCombine combine,
+ Node* frame_state_before) {
+ if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
+ DCHECK_EQ(2, OperatorProperties::GetFrameStateInputCount(node->op()));
+
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 0)->opcode());
+ NodeProperties::ReplaceFrameStateInput(
+ node, 0, environment()->Checkpoint(ast_id, combine));
+
+ DCHECK_EQ(IrOpcode::kDead,
+ NodeProperties::GetFrameStateInput(node, 1)->opcode());
+ NodeProperties::ReplaceFrameStateInput(node, 1, frame_state_before);
}
}
@@ -2963,7 +3248,7 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK(op->ValueInputCount() == value_input_count);
bool has_context = OperatorProperties::HasContextInput(op);
- bool has_framestate = OperatorProperties::HasFrameStateInput(op);
+ int frame_state_count = OperatorProperties::GetFrameStateInputCount(op);
bool has_control = op->ControlInputCount() == 1;
bool has_effect = op->EffectInputCount() == 1;
@@ -2971,12 +3256,13 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
DCHECK(op->EffectInputCount() < 2);
Node* result = NULL;
- if (!has_context && !has_framestate && !has_control && !has_effect) {
+ if (!has_context && frame_state_count == 0 && !has_control && !has_effect) {
result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
} else {
+ bool inside_try_scope = try_nesting_level_ > 0;
int input_count_with_deps = value_input_count;
if (has_context) ++input_count_with_deps;
- if (has_framestate) ++input_count_with_deps;
+ input_count_with_deps += frame_state_count;
if (has_control) ++input_count_with_deps;
if (has_effect) ++input_count_with_deps;
Node** buffer = EnsureInputBufferSize(input_count_with_deps);
@@ -2985,7 +3271,7 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (has_context) {
*current_input++ = current_context();
}
- if (has_framestate) {
+ for (int i = 0; i < frame_state_count; i++) {
// The frame state will be inserted later. Here we misuse
// the {DeadControl} node as a sentinel to be later overwritten
// with the real frame state.
@@ -3001,9 +3287,22 @@ Node* AstGraphBuilder::MakeNode(const Operator* op, int value_input_count,
if (has_effect) {
environment_->UpdateEffectDependency(result);
}
- if (result->op()->ControlOutputCount() > 0 &&
- !environment()->IsMarkedAsUnreachable()) {
- environment_->UpdateControlDependency(result);
+ if (!environment()->IsMarkedAsUnreachable()) {
+ // Update the current control dependency for control-producing nodes.
+ if (NodeProperties::IsControl(result)) {
+ environment_->UpdateControlDependency(result);
+ }
+ // Add implicit exception continuation for throwing nodes.
+ if (!result->op()->HasProperty(Operator::kNoThrow) && inside_try_scope) {
+ Node* on_exception = graph()->NewNode(common()->IfException(), result);
+ environment_->UpdateControlDependency(on_exception);
+ execution_control()->ThrowValue(result);
+ }
+ // Add implicit success continuation for throwing nodes.
+ if (!result->op()->HasProperty(Operator::kNoThrow)) {
+ Node* on_success = graph()->NewNode(common()->IfSuccess(), result);
+ environment_->UpdateControlDependency(on_success);
+ }
}
}
@@ -3034,6 +3333,7 @@ void AstGraphBuilder::Environment::Merge(Environment* other) {
if (this->IsMarkedAsUnreachable()) {
Node* other_control = other->control_dependency_;
Node* inputs[] = {other_control};
+ liveness_block_ = other->liveness_block_;
control_dependency_ =
graph()->NewNode(common()->Merge(1), arraysize(inputs), inputs, true);
effect_dependency_ = other->effect_dependency_;
@@ -3045,6 +3345,18 @@ void AstGraphBuilder::Environment::Merge(Environment* other) {
return;
}
+ // Record the merge for the local variable liveness calculation.
+ // Unfortunately, we have to mirror the logic in the MergeControl method:
+ // connect before merge or loop, or create a new merge otherwise.
+ if (FLAG_analyze_environment_liveness) {
+ if (GetControlDependency()->opcode() != IrOpcode::kLoop &&
+ GetControlDependency()->opcode() != IrOpcode::kMerge) {
+ liveness_block_ =
+ builder_->liveness_analyzer()->NewBlock(liveness_block());
+ }
+ liveness_block()->AddPredecessor(other->liveness_block());
+ }
+
// Create a merge of the control dependencies of both environments and update
// the current environment's control dependency accordingly.
Node* control = builder_->MergeControl(this->GetControlDependency(),
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index ebeb6c613c..f75207783f 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -7,6 +7,8 @@
#include "src/ast.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/liveness-analyzer.h"
+#include "src/compiler/state-values-utils.h"
namespace v8 {
namespace internal {
@@ -17,6 +19,7 @@ namespace compiler {
class ControlBuilder;
class Graph;
+class JSTypeFeedbackTable;
class LoopAssignmentAnalysis;
class LoopBuilder;
class Node;
@@ -28,10 +31,11 @@ class Node;
class AstGraphBuilder : public AstVisitor {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- LoopAssignmentAnalysis* loop_assignment = NULL);
+ LoopAssignmentAnalysis* loop_assignment = NULL,
+ JSTypeFeedbackTable* js_type_feedback = NULL);
// Creates a graph by visiting the entire AST.
- bool CreateGraph(bool constant_context);
+ bool CreateGraph(bool constant_context, bool stack_check = true);
// Helpers to create new control nodes.
Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
@@ -84,6 +88,9 @@ class AstGraphBuilder : public AstVisitor {
SetOncePointer<Node> function_closure_;
SetOncePointer<Node> function_context_;
+ // Tracks how many try-blocks are currently entered.
+ int try_nesting_level_;
+
// Temporary storage for building node input lists.
int input_buffer_size_;
Node** input_buffer_;
@@ -94,6 +101,15 @@ class AstGraphBuilder : public AstVisitor {
// Result of loop assignment analysis performed before graph creation.
LoopAssignmentAnalysis* loop_assignment_analysis_;
+ // Cache for StateValues nodes for frame states.
+ StateValuesCache state_values_cache_;
+
+ // Analyzer of local variable liveness.
+ LivenessAnalyzer liveness_analyzer_;
+
+ // Type feedback table.
+ JSTypeFeedbackTable* js_type_feedback_;
+
// Growth increment for the temporary buffer used to construct input lists to
// new nodes.
static const int kInputBufferSizeIncrement = 64;
@@ -114,6 +130,7 @@ class AstGraphBuilder : public AstVisitor {
Scope* current_scope() const;
Node* current_context() const;
Node* exit_control() const { return exit_control_; }
+ LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
void set_environment(Environment* env) { environment_ = env; }
void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
@@ -122,7 +139,7 @@ class AstGraphBuilder : public AstVisitor {
void set_exit_control(Node* exit) { exit_control_ = exit; }
// Create the main graph body by visiting the AST.
- void CreateGraphBody();
+ void CreateGraphBody(bool stack_check);
// Create the node that represents the outer context of the function.
void CreateFunctionContext(bool constant_context);
@@ -191,11 +208,39 @@ class AstGraphBuilder : public AstVisitor {
// Helper to indicate a node exits the function body.
void UpdateControlDependencyToLeaveFunction(Node* exit);
- //
+ // Builds deoptimization for a given node.
+ void PrepareFrameState(
+ Node* node, BailoutId ast_id,
+ OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore());
+ void PrepareFrameStateAfterAndBefore(Node* node, BailoutId ast_id,
+ OutputFrameStateCombine combine,
+ Node* frame_state_before);
+
+ BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
+
+ // Check if the given statement is an OSR entry.
+ // If so, record the stack height into the compilation and return {true}.
+ bool CheckOsrEntry(IterationStatement* stmt);
+
+ // Computes local variable liveness and replaces dead variables in
+ // frame states with the undefined values.
+ void ClearNonLiveSlotsInFrameStates();
+
+ // Helper to wrap a Handle<T> into a Unique<T>.
+ template <class T>
+ Unique<T> MakeUnique(Handle<T> object) {
+ return Unique<T>::CreateUninitialized(object);
+ }
+
+ Node** EnsureInputBufferSize(int size);
+
+ // Named and keyed loads require a VectorSlotPair for successful lowering.
+ VectorSlotPair CreateVectorSlotPair(FeedbackVectorICSlot slot) const;
+
+ // ===========================================================================
// The following build methods all generate graph fragments and return one
// resulting node. The operand stack height remains the same, variables and
// other dependencies tracked by the environment might be mutated though.
- //
// Builder to create a receiver check for sloppy mode.
Node* BuildPatchReceiverToGlobalProxy(Node* receiver);
@@ -221,6 +266,17 @@ class AstGraphBuilder : public AstVisitor {
const VectorSlotPair& feedback,
ContextualMode mode = CONTEXTUAL);
+ // Builders for property loads and stores.
+ Node* BuildKeyedLoad(Node* receiver, Node* key,
+ const VectorSlotPair& feedback, TypeFeedbackId id);
+ Node* BuildNamedLoad(Node* receiver, Handle<Name> name,
+ const VectorSlotPair& feedback, TypeFeedbackId id,
+ ContextualMode mode = NOT_CONTEXTUAL);
+ Node* BuildKeyedStore(Node* receiver, Node* key, Node* value,
+ TypeFeedbackId id);
+ Node* BuildNamedStore(Node* receiver, Handle<Name>, Node* value,
+ TypeFeedbackId id);
+
// Builders for accessing the function context.
Node* BuildLoadBuiltinsObject();
Node* BuildLoadGlobalObject();
@@ -228,6 +284,10 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildLoadClosure();
Node* BuildLoadObjectField(Node* object, int offset);
+ // Builders for accessing external references.
+ Node* BuildLoadExternal(ExternalReference ref, MachineType type);
+ Node* BuildStoreExternal(ExternalReference ref, MachineType type, Node* val);
+
// Builders for automatic type conversion.
Node* BuildToBoolean(Node* value);
Node* BuildToName(Node* value, BailoutId bailout_id);
@@ -237,6 +297,7 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildSetHomeObject(Node* value, Node* home_object, Expression* expr);
// Builders for error reporting at runtime.
+ Node* BuildThrowError(Node* exception, BailoutId bailout_id);
Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
Node* BuildThrowConstAssignError(BailoutId bailout_id);
@@ -245,6 +306,9 @@ class AstGraphBuilder : public AstVisitor {
Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole,
BailoutId bailout_id);
+ // Builders for conditional errors.
+ Node* BuildThrowIfStaticPrototype(Node* name, BailoutId bailout_id);
+
// Builders for non-local control flow.
Node* BuildReturn(Node* return_value);
Node* BuildThrow(Node* exception_value);
@@ -252,28 +316,17 @@ class AstGraphBuilder : public AstVisitor {
// Builders for binary operations.
Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
- // Builder for stack-check guards.
- Node* BuildStackCheck();
-
- // Check if the given statement is an OSR entry.
- // If so, record the stack height into the compilation and return {true}.
- bool CheckOsrEntry(IterationStatement* stmt);
-
- // Helper to wrap a Handle<T> into a Unique<T>.
- template <class T>
- Unique<T> MakeUnique(Handle<T> object) {
- return Unique<T>::CreateUninitialized(object);
- }
-
- Node** EnsureInputBufferSize(int size);
-
- // Named and keyed loads require a VectorSlotPair for successful lowering.
- VectorSlotPair CreateVectorSlotPair(FeedbackVectorICSlot slot) const;
-
// Process arguments to a call by popping {arity} elements off the operand
// stack and build a call node using the given call operator.
Node* ProcessArguments(const Operator* op, int arity);
+ // ===========================================================================
+ // The following visitation methods all recursively visit a subtree of the
+ // underlying AST and extent the graph. The operand stack is mutated in a way
+ // consistent with other compilers:
+ // - Expressions pop operands and push result, depending on {AstContext}.
+ // - Statements keep the operand stack balanced.
+
// Visit statements.
void VisitIfNotNull(Statement* stmt);
@@ -287,7 +340,7 @@ class AstGraphBuilder : public AstVisitor {
void VisitForValues(ZoneList<Expression*>* exprs);
// Common for all IterationStatement bodies.
- void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop, int);
+ void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop);
// Dispatched from VisitCallRuntime.
void VisitCallJSRuntime(CallRuntime* expr);
@@ -311,13 +364,6 @@ class AstGraphBuilder : public AstVisitor {
// Dispatched from VisitClassLiteral.
void VisitClassLiteralContents(ClassLiteral* expr);
- // Builds deoptimization for a given node.
- void PrepareFrameState(
- Node* node, BailoutId ast_id,
- OutputFrameStateCombine combine = OutputFrameStateCombine::Ignore());
-
- BitVector* GetVariablesAssignedInLoop(IterationStatement* stmt);
-
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
};
@@ -342,26 +388,10 @@ class AstGraphBuilder::Environment : public ZoneObject {
locals_count_;
}
- // Operations on parameter or local variables. The parameter indices are
- // shifted by 1 (receiver is parameter index -1 but environment index 0).
- void Bind(Variable* variable, Node* node) {
- DCHECK(variable->IsStackAllocated());
- if (variable->IsParameter()) {
- values()->at(variable->index() + 1) = node;
- } else {
- DCHECK(variable->IsStackLocal());
- values()->at(variable->index() + parameters_count_) = node;
- }
- }
- Node* Lookup(Variable* variable) {
- DCHECK(variable->IsStackAllocated());
- if (variable->IsParameter()) {
- return values()->at(variable->index() + 1);
- } else {
- DCHECK(variable->IsStackLocal());
- return values()->at(variable->index() + parameters_count_);
- }
- }
+ // Operations on parameter or local variables.
+ void Bind(Variable* variable, Node* node);
+ Node* Lookup(Variable* variable);
+ void MarkAllLocalsLive();
Node* Context() const { return contexts_.back(); }
void PushContext(Node* context) { contexts()->push_back(context); }
@@ -397,10 +427,16 @@ class AstGraphBuilder::Environment : public ZoneObject {
DCHECK(depth >= 0 && depth <= stack_height());
values()->erase(values()->end() - depth, values()->end());
}
+ void Trim(int trim_to_height) {
+ int depth = stack_height() - trim_to_height;
+ DCHECK(depth >= 0 && depth <= stack_height());
+ values()->erase(values()->end() - depth, values()->end());
+ }
// Preserve a checkpoint of the environment for the IR graph. Any
// further mutation of the environment will not affect checkpoints.
- Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine);
+ Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine =
+ OutputFrameStateCombine::Ignore());
// Control dependency tracked by this environment.
Node* GetControlDependency() { return control_dependency_; }
@@ -438,7 +474,7 @@ class AstGraphBuilder::Environment : public ZoneObject {
// Copies this environment at a loop header control-flow point.
Environment* CopyForLoop(BitVector* assigned, bool is_osr = false) {
PrepareForLoop(assigned, is_osr);
- return Copy();
+ return CopyAndShareLiveness();
}
int ContextStackDepth() { return static_cast<int>(contexts_.size()); }
@@ -447,6 +483,7 @@ class AstGraphBuilder::Environment : public ZoneObject {
AstGraphBuilder* builder_;
int parameters_count_;
int locals_count_;
+ LivenessAnalyzerBlock* liveness_block_;
NodeVector values_;
NodeVector contexts_;
Node* control_dependency_;
@@ -455,15 +492,18 @@ class AstGraphBuilder::Environment : public ZoneObject {
Node* locals_node_;
Node* stack_node_;
- explicit Environment(const Environment* copy);
+ explicit Environment(Environment* copy);
Environment* Copy() { return new (zone()) Environment(this); }
+ Environment* CopyAndShareLiveness();
void UpdateStateValues(Node** state_values, int offset, int count);
+ void UpdateStateValuesWithCache(Node** state_values, int offset, int count);
Zone* zone() const { return builder_->local_zone(); }
Graph* graph() const { return builder_->graph(); }
AstGraphBuilder* builder() const { return builder_; }
CommonOperatorBuilder* common() { return builder_->common(); }
NodeVector* values() { return &values_; }
NodeVector* contexts() { return &contexts_; }
+ LivenessAnalyzerBlock* liveness_block() { return liveness_block_; }
// Prepare environment to be used as loop header.
void PrepareForLoop(BitVector* assigned, bool is_osr = false);
diff --git a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
index c81d5483bf..4028b9412b 100644
--- a/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/deps/v8/src/compiler/ast-loop-assignment-analyzer.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/compiler/ast-loop-assignment-analyzer.h"
+#include "src/compiler.h"
#include "src/parser.h"
namespace v8 {
diff --git a/deps/v8/src/compiler/basic-block-instrumentor.cc b/deps/v8/src/compiler/basic-block-instrumentor.cc
index 54f9d6b059..00291bba48 100644
--- a/deps/v8/src/compiler/basic-block-instrumentor.cc
+++ b/deps/v8/src/compiler/basic-block-instrumentor.cc
@@ -55,8 +55,7 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
BasicBlockProfiler::Data* data =
info->isolate()->GetOrCreateBasicBlockProfiler()->NewData(n_blocks);
// Set the function name.
- if (!info->shared_info().is_null() &&
- info->shared_info()->name()->IsString()) {
+ if (info->has_shared_info() && info->shared_info()->name()->IsString()) {
std::ostringstream os;
String::cast(info->shared_info()->name())->PrintUC16(os);
data->SetFunctionName(&os);
diff --git a/deps/v8/src/compiler/change-lowering.cc b/deps/v8/src/compiler/change-lowering.cc
index 0057b10f81..81af22b0f0 100644
--- a/deps/v8/src/compiler/change-lowering.cc
+++ b/deps/v8/src/compiler/change-lowering.cc
@@ -5,7 +5,6 @@
#include "src/compiler/change-lowering.h"
#include "src/code-factory.h"
-#include "src/compiler/diamond.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/machine-operator.h"
@@ -47,25 +46,17 @@ Reduction ChangeLowering::Reduce(Node* node) {
Node* ChangeLowering::HeapNumberValueIndexConstant() {
- STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
- const int heap_number_value_offset =
- ((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4));
- return jsgraph()->IntPtrConstant(heap_number_value_offset - kHeapObjectTag);
+ return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
}
Node* ChangeLowering::SmiMaxValueConstant() {
- const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize()
- : SmiTagging<8>::SmiValueSize();
- return jsgraph()->Int32Constant(
- -(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
+ return jsgraph()->Int32Constant(Smi::kMaxValue);
}
Node* ChangeLowering::SmiShiftBitsConstant() {
- const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize()
- : SmiTagging<8>::SmiShiftSize();
- return jsgraph()->IntPtrConstant(smi_shift_size + kSmiTagSize);
+ return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
@@ -73,14 +64,17 @@ Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
// The AllocateHeapNumberStub does not use the context, so we can safely pass
// in Smi zero here.
Callable callable = CodeFactory::AllocateHeapNumber(isolate());
- CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
- isolate(), jsgraph()->zone(), callable.descriptor(), 0,
- CallDescriptor::kNoFlags);
Node* target = jsgraph()->HeapConstant(callable.code());
Node* context = jsgraph()->NoContextConstant();
Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
- Node* heap_number = graph()->NewNode(common()->Call(descriptor), target,
- context, effect, control);
+ if (!allocate_heap_number_operator_.is_set()) {
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), jsgraph()->zone(), callable.descriptor(), 0,
+ CallDescriptor::kNoFlags, Operator::kNoThrow);
+ allocate_heap_number_operator_.set(common()->Call(descriptor));
+ }
+ Node* heap_number = graph()->NewNode(allocate_heap_number_operator_.get(),
+ target, context, effect, control);
Node* store = graph()->NewNode(
machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
@@ -93,6 +87,14 @@ Node* ChangeLowering::ChangeInt32ToFloat64(Node* value) {
}
+Node* ChangeLowering::ChangeInt32ToSmi(Node* value) {
+ if (machine()->Is64()) {
+ value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+ }
+ return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+}
+
+
Node* ChangeLowering::ChangeSmiToFloat64(Node* value) {
return ChangeInt32ToFloat64(ChangeSmiToInt32(value));
}
@@ -135,64 +137,80 @@ Node* ChangeLowering::TestNotSmi(Node* value) {
}
-Node* ChangeLowering::Uint32LessThanOrEqual(Node* lhs, Node* rhs) {
- return graph()->NewNode(machine()->Uint32LessThanOrEqual(), lhs, rhs);
-}
-
-
-Reduction ChangeLowering::ChangeBitToBool(Node* val, Node* control) {
- MachineType const type = static_cast<MachineType>(kTypeBool | kRepTagged);
- return Replace(graph()->NewNode(common()->Select(type), val,
+Reduction ChangeLowering::ChangeBitToBool(Node* value, Node* control) {
+ return Replace(graph()->NewNode(common()->Select(kMachAnyTagged), value,
jsgraph()->TrueConstant(),
jsgraph()->FalseConstant()));
}
-Reduction ChangeLowering::ChangeBoolToBit(Node* val) {
- return Replace(
- graph()->NewNode(machine()->WordEqual(), val, jsgraph()->TrueConstant()));
+Reduction ChangeLowering::ChangeBoolToBit(Node* value) {
+ return Replace(graph()->NewNode(machine()->WordEqual(), value,
+ jsgraph()->TrueConstant()));
}
-Reduction ChangeLowering::ChangeFloat64ToTagged(Node* val, Node* control) {
- return Replace(AllocateHeapNumberWithValue(val, control));
+Reduction ChangeLowering::ChangeFloat64ToTagged(Node* value, Node* control) {
+ return Replace(AllocateHeapNumberWithValue(value, control));
}
Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
- if (machine()->Is64()) {
- return Replace(graph()->NewNode(
- machine()->Word64Shl(),
- graph()->NewNode(machine()->ChangeInt32ToInt64(), value),
- SmiShiftBitsConstant()));
- } else if (NodeProperties::GetBounds(value).upper->Is(Type::Signed31())) {
- return Replace(
- graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant()));
+ if (machine()->Is64() ||
+ NodeProperties::GetBounds(value).upper->Is(Type::SignedSmall())) {
+ return Replace(ChangeInt32ToSmi(value));
}
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
+
Node* ovf = graph()->NewNode(common()->Projection(1), add);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue =
+ AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(common()->Projection(0), add);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue, vfalse, merge);
- Diamond d(graph(), common(), ovf, BranchHint::kFalse);
- d.Chain(control);
- return Replace(
- d.Phi(kMachAnyTagged,
- AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), d.if_true),
- graph()->NewNode(common()->Projection(0), add)));
+ return Replace(phi);
}
Reduction ChangeLowering::ChangeTaggedToUI32(Node* value, Node* control,
Signedness signedness) {
+ if (NodeProperties::GetBounds(value).upper->Is(Type::TaggedSigned())) {
+ return Replace(ChangeSmiToInt32(value));
+ }
+
const MachineType type = (signedness == kSigned) ? kMachInt32 : kMachUint32;
const Operator* op = (signedness == kSigned)
? machine()->ChangeFloat64ToInt32()
: machine()->ChangeFloat64ToUint32();
- Diamond d(graph(), common(), TestNotSmi(value), BranchHint::kFalse);
- d.Chain(control);
- return Replace(
- d.Phi(type, graph()->NewNode(op, LoadHeapNumberValue(value, d.if_true)),
- ChangeSmiToInt32(value)));
+
+ if (NodeProperties::GetBounds(value).upper->Is(Type::TaggedPointer())) {
+ return Replace(graph()->NewNode(op, LoadHeapNumberValue(value, control)));
+ }
+
+ Node* check = TestNotSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = graph()->NewNode(op, LoadHeapNumberValue(value, if_true));
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = ChangeSmiToInt32(value);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(common()->Phi(type, 2), vtrue, vfalse, merge);
+
+ return Replace(phi);
}
@@ -202,6 +220,7 @@ bool CanCover(Node* value, IrOpcode::Value opcode) {
if (value->opcode() != opcode) return false;
bool first = true;
for (Edge const edge : value->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) continue;
if (NodeProperties::IsEffectEdge(edge)) continue;
DCHECK(NodeProperties::IsValueEdge(edge));
if (!first) return false;
@@ -225,48 +244,88 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
Node* const effect = NodeProperties::GetEffectInput(value);
Node* const control = NodeProperties::GetControlInput(value);
- Diamond d1(graph(), common(), TestNotSmi(object), BranchHint::kFalse);
- d1.Chain(control);
+ const Operator* merge_op = common()->Merge(2);
+ const Operator* ephi_op = common()->EffectPhi(2);
+ const Operator* phi_op = common()->Phi(kMachFloat64, 2);
- Node* number =
- OperatorProperties::HasFrameStateInput(value->op())
+ Node* check1 = TestNotSmi(object);
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* vtrue1 =
+ FLAG_turbo_deoptimization
? graph()->NewNode(value->op(), object, context,
- NodeProperties::GetFrameStateInput(value),
- effect, d1.if_true)
- : graph()->NewNode(value->op(), object, context, effect,
- d1.if_true);
- Diamond d2(graph(), common(), TestNotSmi(number));
- d2.Nest(d1, true);
- Node* phi2 = d2.Phi(kMachFloat64, LoadHeapNumberValue(number, d2.if_true),
- ChangeSmiToFloat64(number));
-
- Node* phi1 = d1.Phi(kMachFloat64, phi2, ChangeSmiToFloat64(object));
- Node* ephi1 = d1.EffectPhi(number, effect);
-
- for (Edge edge : value->use_edges()) {
- if (NodeProperties::IsEffectEdge(edge)) {
- edge.UpdateTo(ephi1);
- }
+ NodeProperties::GetFrameStateInput(value, 0),
+ effect, if_true1)
+ : graph()->NewNode(value->op(), object, context, effect, if_true1);
+ Node* etrue1 = vtrue1;
+ {
+ Node* check2 = TestNotSmi(vtrue1);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_true1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* vtrue2 = LoadHeapNumberValue(vtrue1, if_true2);
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* vfalse2 = ChangeSmiToFloat64(vtrue1);
+
+ if_true1 = graph()->NewNode(merge_op, if_true2, if_false2);
+ vtrue1 = graph()->NewNode(phi_op, vtrue2, vfalse2, if_true1);
}
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* vfalse1 = ChangeSmiToFloat64(object);
+ Node* efalse1 = effect;
+
+ Node* merge1 = graph()->NewNode(merge_op, if_true1, if_false1);
+ Node* ephi1 = graph()->NewNode(ephi_op, etrue1, efalse1, merge1);
+ Node* phi1 = graph()->NewNode(phi_op, vtrue1, vfalse1, merge1);
+
+ NodeProperties::ReplaceWithValue(value, phi1, ephi1, merge1);
return Replace(phi1);
}
- Diamond d(graph(), common(), TestNotSmi(value), BranchHint::kFalse);
- d.Chain(control);
- Node* load = LoadHeapNumberValue(value, d.if_true);
- Node* number = ChangeSmiToFloat64(value);
- return Replace(d.Phi(kMachFloat64, load, number));
+ Node* check = TestNotSmi(value);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = LoadHeapNumberValue(value, if_true);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = ChangeSmiToFloat64(value);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi =
+ graph()->NewNode(common()->Phi(kMachFloat64, 2), vtrue, vfalse, merge);
+
+ return Replace(phi);
}
Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
- Diamond d(graph(), common(),
- Uint32LessThanOrEqual(value, SmiMaxValueConstant()),
- BranchHint::kTrue);
- d.Chain(control);
- return Replace(d.Phi(
- kMachAnyTagged, ChangeUint32ToSmi(value),
- AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), d.if_false)));
+ if (NodeProperties::GetBounds(value).upper->Is(Type::UnsignedSmall())) {
+ return Replace(ChangeUint32ToSmi(value));
+ }
+
+ Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
+ SmiMaxValueConstant());
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = ChangeUint32ToSmi(value);
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse =
+ AllocateHeapNumberWithValue(ChangeUint32ToFloat64(value), if_false);
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi =
+ graph()->NewNode(common()->Phi(kMachAnyTagged, 2), vtrue, vfalse, merge);
+
+ return Replace(phi);
}
diff --git a/deps/v8/src/compiler/change-lowering.h b/deps/v8/src/compiler/change-lowering.h
index 40a3e152b5..0d498ccd97 100644
--- a/deps/v8/src/compiler/change-lowering.h
+++ b/deps/v8/src/compiler/change-lowering.h
@@ -16,6 +16,7 @@ class CommonOperatorBuilder;
class JSGraph;
class Linkage;
class MachineOperatorBuilder;
+class Operator;
class ChangeLowering FINAL : public Reducer {
public:
@@ -31,13 +32,13 @@ class ChangeLowering FINAL : public Reducer {
Node* AllocateHeapNumberWithValue(Node* value, Node* control);
Node* ChangeInt32ToFloat64(Node* value);
+ Node* ChangeInt32ToSmi(Node* value);
Node* ChangeSmiToFloat64(Node* value);
Node* ChangeSmiToInt32(Node* value);
Node* ChangeUint32ToFloat64(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* LoadHeapNumberValue(Node* value, Node* control);
Node* TestNotSmi(Node* value);
- Node* Uint32LessThanOrEqual(Node* lhs, Node* rhs);
Reduction ChangeBitToBool(Node* value, Node* control);
Reduction ChangeBoolToBit(Node* value);
@@ -54,7 +55,8 @@ class ChangeLowering FINAL : public Reducer {
CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
- JSGraph* jsgraph_;
+ JSGraph* const jsgraph_;
+ SetOncePointer<const Operator> allocate_heap_number_operator_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/code-generator-impl.h b/deps/v8/src/compiler/code-generator-impl.h
index 129f9409e3..74233ac6fe 100644
--- a/deps/v8/src/compiler/code-generator-impl.h
+++ b/deps/v8/src/compiler/code-generator-impl.h
@@ -27,49 +27,53 @@ class InstructionOperandConverter {
// -- Instruction operand accesses with conversions --------------------------
- Register InputRegister(int index) {
+ Register InputRegister(size_t index) {
return ToRegister(instr_->InputAt(index));
}
- DoubleRegister InputDoubleRegister(int index) {
+ DoubleRegister InputDoubleRegister(size_t index) {
return ToDoubleRegister(instr_->InputAt(index));
}
- double InputDouble(int index) { return ToDouble(instr_->InputAt(index)); }
+ double InputDouble(size_t index) { return ToDouble(instr_->InputAt(index)); }
- int32_t InputInt32(int index) {
+ int32_t InputInt32(size_t index) {
return ToConstant(instr_->InputAt(index)).ToInt32();
}
- int8_t InputInt8(int index) { return static_cast<int8_t>(InputInt32(index)); }
+ int8_t InputInt8(size_t index) {
+ return static_cast<int8_t>(InputInt32(index));
+ }
- int16_t InputInt16(int index) {
+ int16_t InputInt16(size_t index) {
return static_cast<int16_t>(InputInt32(index));
}
- uint8_t InputInt5(int index) {
+ uint8_t InputInt5(size_t index) {
return static_cast<uint8_t>(InputInt32(index) & 0x1F);
}
- uint8_t InputInt6(int index) {
+ uint8_t InputInt6(size_t index) {
return static_cast<uint8_t>(InputInt32(index) & 0x3F);
}
- Handle<HeapObject> InputHeapObject(int index) {
+ Handle<HeapObject> InputHeapObject(size_t index) {
return ToHeapObject(instr_->InputAt(index));
}
- Label* InputLabel(int index) { return ToLabel(instr_->InputAt(index)); }
+ Label* InputLabel(size_t index) { return ToLabel(instr_->InputAt(index)); }
- BasicBlock::RpoNumber InputRpo(int index) {
+ RpoNumber InputRpo(size_t index) {
return ToRpoNumber(instr_->InputAt(index));
}
- Register OutputRegister(int index = 0) {
+ Register OutputRegister(size_t index = 0) {
return ToRegister(instr_->OutputAt(index));
}
- Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+ Register TempRegister(size_t index) {
+ return ToRegister(instr_->TempAt(index));
+ }
DoubleRegister OutputDoubleRegister() {
return ToDoubleRegister(instr_->Output());
@@ -81,7 +85,7 @@ class InstructionOperandConverter {
return gen_->GetLabel(ToRpoNumber(op));
}
- BasicBlock::RpoNumber ToRpoNumber(InstructionOperand* op) {
+ RpoNumber ToRpoNumber(InstructionOperand* op) {
return ToConstant(op).ToRpoNumber();
}
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 1a4566d609..47dce3105c 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -37,11 +37,12 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
code_(code),
info_(info),
labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
- current_block_(BasicBlock::RpoNumber::Invalid()),
+ current_block_(RpoNumber::Invalid()),
current_source_position_(SourcePosition::Invalid()),
masm_(info->isolate(), NULL, 0),
resolver_(this),
safepoints_(code->zone()),
+ handlers_(code->zone()),
deoptimization_states_(code->zone()),
deoptimization_literals_(code->zone()),
translations_(code->zone()),
@@ -84,7 +85,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
if (FLAG_code_comments) {
// TODO(titzer): these code comments are a giant memory leak.
Vector<char> buffer = Vector<char>::New(32);
- SNPrintF(buffer, "-- B%d start --", block->id().ToInt());
+ SNPrintF(buffer, "-- B%d start --", block->rpo_number().ToInt());
masm()->RecordComment(buffer.start());
}
masm()->bind(GetLabel(current_block_));
@@ -131,6 +132,19 @@ Handle<Code> CodeGenerator::GenerateCode() {
result->set_stack_slots(frame()->GetSpillSlotCount());
result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
+ // Emit exception handler table.
+ if (!handlers_.empty()) {
+ Handle<HandlerTable> table =
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForReturn(static_cast<int>(handlers_.size())),
+ TENURED));
+ for (size_t i = 0; i < handlers_.size(); ++i) {
+ table->SetReturnOffset(static_cast<int>(i), handlers_[i].pc_offset);
+ table->SetReturnHandler(static_cast<int>(i), handlers_[i].handler->pos());
+ }
+ result->set_handler_table(*table);
+ }
+
PopulateDeoptimizationData(result);
// Ensure there is space for lazy deoptimization in the relocation info.
@@ -146,7 +160,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
-bool CodeGenerator::IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const {
+bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
return code()->InstructionBlockAt(current_block_)->ao_number().IsNext(
code()->InstructionBlockAt(block)->ao_number());
}
@@ -186,10 +200,8 @@ void CodeGenerator::AssembleInstruction(Instruction* instr) {
if (mode == kFlags_branch) {
// Assemble a branch after this instruction.
InstructionOperandConverter i(this, instr);
- BasicBlock::RpoNumber true_rpo =
- i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
- BasicBlock::RpoNumber false_rpo =
- i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
+ RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
+ RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
if (true_rpo == false_rpo) {
// redundant branch.
@@ -321,7 +333,7 @@ Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
}
-void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
+void CodeGenerator::RecordCallPosition(Instruction* instr) {
CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
@@ -330,16 +342,21 @@ void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
instr->pointer_map(), Safepoint::kSimple, 0,
needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
+ if (flags & CallDescriptor::kHasExceptionHandler) {
+ InstructionOperandConverter i(this, instr);
+ RpoNumber handler_rpo =
+ i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
+ handlers_.push_back({GetLabel(handler_rpo), masm()->pc_offset()});
+ }
+
if (flags & CallDescriptor::kNeedsNopAfterCall) {
AddNopForSmiCodeInlining();
}
if (needs_frame_state) {
MarkLazyDeoptSite();
- // If the frame state is present, it starts at argument 1
- // (just after the code address).
- InstructionOperandConverter converter(this, instr);
- // Deoptimization info starts at argument 1
+ // If the frame state is present, it starts at argument 1 (just after the
+ // code address).
size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetFrameStateDescriptor(instr, frame_state_offset);
@@ -383,8 +400,8 @@ int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
Instruction* instr, size_t frame_state_offset) {
InstructionOperandConverter i(this, instr);
- InstructionSequence::StateId state_id = InstructionSequence::StateId::FromInt(
- i.InputInt32(static_cast<int>(frame_state_offset)));
+ InstructionSequence::StateId state_id =
+ InstructionSequence::StateId::FromInt(i.InputInt32(frame_state_offset));
return code()->GetFrameStateDescriptor(state_id);
}
@@ -493,8 +510,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
InstructionOperand* op,
MachineType type) {
if (op->IsStackSlot()) {
- if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
- type == kMachInt16) {
+ // TODO(jarin) kMachBool and kRepBit should materialize true and false
+ // rather than creating an int value.
+ if (type == kMachBool || type == kRepBit || type == kMachInt32 ||
+ type == kMachInt8 || type == kMachInt16) {
translation->StoreInt32StackSlot(op->index());
} else if (type == kMachUint32 || type == kMachUint16 ||
type == kMachUint8) {
@@ -509,8 +528,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsRegister()) {
InstructionOperandConverter converter(this, instr);
- if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
- type == kMachInt16) {
+ // TODO(jarin) kMachBool and kRepBit should materialize true and false
+ // rather than creating an int value.
+ if (type == kMachBool || type == kRepBit || type == kMachInt32 ||
+ type == kMachInt8 || type == kMachInt16) {
translation->StoreInt32Register(converter.ToRegister(op));
} else if (type == kMachUint32 || type == kMachUint16 ||
type == kMachUint8) {
@@ -530,12 +551,14 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
Handle<Object> constant_object;
switch (constant.type()) {
case Constant::kInt32:
- DCHECK(type == kMachInt32 || type == kMachUint32);
+ DCHECK(type == kMachInt32 || type == kMachUint32 || type == kRepBit);
constant_object =
isolate()->factory()->NewNumberFromInt(constant.ToInt32());
break;
case Constant::kFloat64:
- DCHECK(type == kMachFloat64 || type == kMachAnyTagged);
+ DCHECK(type == kMachFloat64 || type == kMachAnyTagged ||
+ type == kRepTagged || type == (kTypeInt32 | kRepTagged) ||
+ type == (kTypeUint32 | kRepTagged));
constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
break;
case Constant::kHeapObject:
@@ -576,12 +599,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
- UNIMPLEMENTED();
-}
+void CodeGenerator::AssembleArchJump(RpoNumber target) { UNIMPLEMENTED(); }
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index 658394b321..462b683cbe 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -41,7 +41,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
Isolate* isolate() const { return info_->isolate(); }
Linkage* linkage() const { return linkage_; }
- Label* GetLabel(BasicBlock::RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
+ Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
private:
MacroAssembler* masm() { return &masm_; }
@@ -52,7 +52,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
// Checks if {block} will appear directly after {current_block_} when
// assembling code, in which case, a fall-through can be used.
- bool IsNextInAssemblyOrder(BasicBlock::RpoNumber block) const;
+ bool IsNextInAssemblyOrder(RpoNumber block) const;
// Record a safepoint with the given pointer map.
void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
@@ -68,13 +68,14 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
// ===========================================================================
void AssembleArchInstruction(Instruction* instr);
- void AssembleArchJump(BasicBlock::RpoNumber target);
+ void AssembleArchJump(RpoNumber target);
void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
void AssembleArchLookupSwitch(Instruction* instr);
void AssembleArchTableSwitch(Instruction* instr);
- void AssembleDeoptimizerCall(int deoptimization_id);
+ void AssembleDeoptimizerCall(int deoptimization_id,
+ Deoptimizer::BailoutType bailout_type);
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
@@ -106,8 +107,10 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
void AssembleJumpTable(Label** targets, size_t target_count);
// ===========================================================================
- // Deoptimization table construction
- void AddSafepointAndDeopt(Instruction* instr);
+ // ================== Deoptimization table construction. =====================
+ // ===========================================================================
+
+ void RecordCallPosition(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
int DefineDeoptimizationLiteral(Handle<Object> literal);
FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
@@ -126,6 +129,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
void MarkLazyDeoptSite();
// ===========================================================================
+
struct DeoptimizationState : ZoneObject {
public:
BailoutId bailout_id() const { return bailout_id_; }
@@ -143,6 +147,11 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
int pc_offset_;
};
+ struct HandlerInfo {
+ Label* handler;
+ int pc_offset;
+ };
+
friend class OutOfLineCode;
Frame* const frame_;
@@ -150,13 +159,14 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
InstructionSequence* const code_;
CompilationInfo* const info_;
Label* const labels_;
- BasicBlock::RpoNumber current_block_;
+ RpoNumber current_block_;
SourcePosition current_source_position_;
MacroAssembler masm_;
GapResolver resolver_;
SafepointTableBuilder safepoints_;
+ ZoneVector<HandlerInfo> handlers_;
ZoneDeque<DeoptimizationState*> deoptimization_states_;
- ZoneDeque<Handle<Object> > deoptimization_literals_;
+ ZoneDeque<Handle<Object>> deoptimization_literals_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
JumpTable* jump_tables_;
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
index c3cbcdefc7..b07383d21c 100644
--- a/deps/v8/src/compiler/common-operator-reducer.cc
+++ b/deps/v8/src/compiler/common-operator-reducer.cc
@@ -4,8 +4,12 @@
#include "src/compiler/common-operator-reducer.h"
+#include <algorithm>
+
#include "src/compiler/common-operator.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
namespace v8 {
namespace internal {
@@ -14,29 +18,119 @@ namespace compiler {
Reduction CommonOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kEffectPhi:
- case IrOpcode::kPhi: {
- int const input_count = node->InputCount();
- if (input_count > 1) {
- Node* const replacement = node->InputAt(0);
- for (int i = 1; i < input_count - 1; ++i) {
- if (node->InputAt(i) != replacement) return NoChange();
- }
- return Replace(replacement);
- }
+ return ReduceEffectPhi(node);
+ case IrOpcode::kPhi:
+ return ReducePhi(node);
+ case IrOpcode::kSelect:
+ return ReduceSelect(node);
+ default:
break;
+ }
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
+ DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
+ int const input_count = node->InputCount();
+ if (input_count > 1) {
+ Node* const replacement = node->InputAt(0);
+ for (int i = 1; i < input_count - 1; ++i) {
+ if (node->InputAt(i) != replacement) return NoChange();
}
- case IrOpcode::kSelect: {
- if (node->InputAt(1) == node->InputAt(2)) {
- return Replace(node->InputAt(1));
+ return Replace(replacement);
+ }
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::ReducePhi(Node* node) {
+ DCHECK_EQ(IrOpcode::kPhi, node->opcode());
+ int const input_count = node->InputCount();
+ if (input_count == 3) {
+ Node* vtrue = NodeProperties::GetValueInput(node, 0);
+ Node* vfalse = NodeProperties::GetValueInput(node, 1);
+ Node* merge = NodeProperties::GetControlInput(node);
+ Node* if_true = NodeProperties::GetControlInput(merge, 0);
+ Node* if_false = NodeProperties::GetControlInput(merge, 1);
+ if (if_true->opcode() != IrOpcode::kIfTrue) {
+ std::swap(if_true, if_false);
+ std::swap(vtrue, vfalse);
+ }
+ if (if_true->opcode() == IrOpcode::kIfTrue &&
+ if_false->opcode() == IrOpcode::kIfFalse &&
+ if_true->InputAt(0) == if_false->InputAt(0)) {
+ Node* branch = if_true->InputAt(0);
+ Node* cond = branch->InputAt(0);
+ if (cond->opcode() == IrOpcode::kFloat64LessThan) {
+ if (cond->InputAt(0) == vtrue && cond->InputAt(1) == vfalse &&
+ machine()->HasFloat64Min()) {
+ node->set_op(machine()->Float64Min());
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->TrimInputCount(2);
+ return Changed(node);
+ } else if (cond->InputAt(0) == vfalse && cond->InputAt(1) == vtrue &&
+ machine()->HasFloat64Max()) {
+ node->set_op(machine()->Float64Max());
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->TrimInputCount(2);
+ return Changed(node);
+ }
}
- break;
}
- default:
- break;
+ }
+ if (input_count > 1) {
+ Node* const replacement = node->InputAt(0);
+ for (int i = 1; i < input_count - 1; ++i) {
+ if (node->InputAt(i) != replacement) return NoChange();
+ }
+ return Replace(replacement);
+ }
+ return NoChange();
+}
+
+
+Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
+ DCHECK_EQ(IrOpcode::kSelect, node->opcode());
+ Node* cond = NodeProperties::GetValueInput(node, 0);
+ Node* vtrue = NodeProperties::GetValueInput(node, 1);
+ Node* vfalse = NodeProperties::GetValueInput(node, 2);
+ if (vtrue == vfalse) return Replace(vtrue);
+ if (cond->opcode() == IrOpcode::kFloat64LessThan) {
+ if (cond->InputAt(0) == vtrue && cond->InputAt(1) == vfalse &&
+ machine()->HasFloat64Min()) {
+ node->set_op(machine()->Float64Min());
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->TrimInputCount(2);
+ return Changed(node);
+ } else if (cond->InputAt(0) == vfalse && cond->InputAt(1) == vtrue &&
+ machine()->HasFloat64Max()) {
+ node->set_op(machine()->Float64Max());
+ node->ReplaceInput(0, vtrue);
+ node->ReplaceInput(1, vfalse);
+ node->TrimInputCount(2);
+ return Changed(node);
+ }
}
return NoChange();
}
+
+CommonOperatorBuilder* CommonOperatorReducer::common() const {
+ return jsgraph()->common();
+}
+
+
+Graph* CommonOperatorReducer::graph() const { return jsgraph()->graph(); }
+
+
+MachineOperatorBuilder* CommonOperatorReducer::machine() const {
+ return jsgraph()->machine();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
index 10543db8c6..dfcbe295bc 100644
--- a/deps/v8/src/compiler/common-operator-reducer.h
+++ b/deps/v8/src/compiler/common-operator-reducer.h
@@ -11,13 +11,32 @@ namespace v8 {
namespace internal {
namespace compiler {
+// Forward declarations.
+class CommonOperatorBuilder;
+class Graph;
+class JSGraph;
+class MachineOperatorBuilder;
+
+
// Performs strength reduction on nodes that have common operators.
class CommonOperatorReducer FINAL : public Reducer {
public:
- CommonOperatorReducer() {}
+ explicit CommonOperatorReducer(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
~CommonOperatorReducer() FINAL {}
Reduction Reduce(Node* node) FINAL;
+
+ private:
+ Reduction ReduceEffectPhi(Node* node);
+ Reduction ReducePhi(Node* node);
+ Reduction ReduceSelect(Node* node);
+
+ CommonOperatorBuilder* common() const;
+ Graph* graph() const;
+ JSGraph* jsgraph() const { return jsgraph_; }
+ MachineOperatorBuilder* machine() const;
+
+ JSGraph* const jsgraph_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 8aea3df5ad..75f353c7fa 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -115,13 +115,25 @@ size_t ProjectionIndexOf(const Operator* const op) {
V(End, Operator::kKontrol, 0, 0, 1, 0, 0, 0) \
V(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
+ V(IfException, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(IfDefault, Operator::kKontrol, 0, 0, 1, 0, 0, 1) \
V(Throw, Operator::kFoldable, 1, 1, 1, 0, 0, 1) \
+ V(Deoptimize, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
V(Return, Operator::kNoThrow, 1, 1, 1, 0, 0, 1) \
V(OsrNormalEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1) \
V(OsrLoopEntry, Operator::kFoldable, 0, 1, 1, 0, 1, 1)
+#define CACHED_EFFECT_PHI_LIST(V) \
+ V(1) \
+ V(2) \
+ V(3) \
+ V(4) \
+ V(5) \
+ V(6)
+
+
#define CACHED_LOOP_LIST(V) \
V(1) \
V(2)
@@ -148,6 +160,40 @@ size_t ProjectionIndexOf(const Operator* const op) {
V(6)
+#define CACHED_PHI_LIST(V) \
+ V(kMachAnyTagged, 1) \
+ V(kMachAnyTagged, 2) \
+ V(kMachAnyTagged, 3) \
+ V(kMachAnyTagged, 4) \
+ V(kMachAnyTagged, 5) \
+ V(kMachAnyTagged, 6) \
+ V(kMachBool, 2) \
+ V(kMachFloat64, 2) \
+ V(kMachInt32, 2)
+
+
+#define CACHED_PROJECTION_LIST(V) \
+ V(0) \
+ V(1)
+
+
+#define CACHED_STATE_VALUES_LIST(V) \
+ V(0) \
+ V(1) \
+ V(2) \
+ V(3) \
+ V(4) \
+ V(5) \
+ V(6) \
+ V(7) \
+ V(8) \
+ V(10) \
+ V(11) \
+ V(12) \
+ V(13) \
+ V(14)
+
+
struct CommonOperatorGlobalCache FINAL {
#define CACHED(Name, properties, value_input_count, effect_input_count, \
control_input_count, value_output_count, effect_output_count, \
@@ -176,6 +222,19 @@ struct CommonOperatorGlobalCache FINAL {
BranchOperator<BranchHint::kTrue> kBranchTrueOperator;
BranchOperator<BranchHint::kFalse> kBranchFalseOperator;
+ template <int kEffectInputCount>
+ struct EffectPhiOperator FINAL : public Operator {
+ EffectPhiOperator()
+ : Operator( // --
+ IrOpcode::kEffectPhi, Operator::kPure, // opcode
+ "EffectPhi", // name
+ 0, kEffectInputCount, 1, 0, 1, 0) {} // counts
+ };
+#define CACHED_EFFECT_PHI(input_count) \
+ EffectPhiOperator<input_count> kEffectPhi##input_count##Operator;
+ CACHED_EFFECT_PHI_LIST(CACHED_EFFECT_PHI)
+#undef CACHED_EFFECT_PHI
+
template <size_t kInputCount>
struct LoopOperator FINAL : public Operator {
LoopOperator()
@@ -202,6 +261,20 @@ struct CommonOperatorGlobalCache FINAL {
CACHED_MERGE_LIST(CACHED_MERGE)
#undef CACHED_MERGE
+ template <MachineType kType, int kInputCount>
+ struct PhiOperator FINAL : public Operator1<MachineType> {
+ PhiOperator()
+ : Operator1<MachineType>( //--
+ IrOpcode::kPhi, Operator::kPure, // opcode
+ "Phi", // name
+ kInputCount, 0, 1, 1, 0, 0, // counts
+ kType) {} // parameter
+ };
+#define CACHED_PHI(type, input_count) \
+ PhiOperator<type, input_count> kPhi##type##input_count##Operator;
+ CACHED_PHI_LIST(CACHED_PHI)
+#undef CACHED_PHI
+
template <int kIndex>
struct ParameterOperator FINAL : public Operator1<int> {
ParameterOperator()
@@ -215,6 +288,35 @@ struct CommonOperatorGlobalCache FINAL {
ParameterOperator<index> kParameter##index##Operator;
CACHED_PARAMETER_LIST(CACHED_PARAMETER)
#undef CACHED_PARAMETER
+
+ template <size_t kIndex>
+ struct ProjectionOperator FINAL : public Operator1<size_t> {
+ ProjectionOperator()
+ : Operator1<size_t>( // --
+ IrOpcode::kProjection, // opcode
+ Operator::kPure, // flags
+ "Projection", // name
+ 1, 0, 0, 1, 0, 0, // counts,
+ kIndex) {} // parameter
+ };
+#define CACHED_PROJECTION(index) \
+ ProjectionOperator<index> kProjection##index##Operator;
+ CACHED_PROJECTION_LIST(CACHED_PROJECTION)
+#undef CACHED_PROJECTION
+
+ template <int kInputCount>
+ struct StateValuesOperator FINAL : public Operator {
+ StateValuesOperator()
+ : Operator( // --
+ IrOpcode::kStateValues, // opcode
+ Operator::kPure, // flags
+ "StateValues", // name
+ kInputCount, 0, 0, 1, 0, 0) {} // counts
+ };
+#define CACHED_STATE_VALUES(input_count) \
+ StateValuesOperator<input_count> kStateValues##input_count##Operator;
+ CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
+#undef CACHED_STATE_VALUES
};
@@ -420,22 +522,40 @@ const Operator* CommonOperatorBuilder::Select(MachineType type,
}
-const Operator* CommonOperatorBuilder::Phi(MachineType type, int arguments) {
- DCHECK(arguments > 0); // Disallow empty phis.
+const Operator* CommonOperatorBuilder::Phi(MachineType type,
+ int value_input_count) {
+ DCHECK(value_input_count > 0); // Disallow empty phis.
+#define CACHED_PHI(kType, kValueInputCount) \
+ if (kType == type && kValueInputCount == value_input_count) { \
+ return &cache_.kPhi##kType##kValueInputCount##Operator; \
+ }
+ CACHED_PHI_LIST(CACHED_PHI)
+#undef CACHED_PHI
+ // Uncached.
return new (zone()) Operator1<MachineType>( // --
IrOpcode::kPhi, Operator::kPure, // opcode
"Phi", // name
- arguments, 0, 1, 1, 0, 0, // counts
+ value_input_count, 0, 1, 1, 0, 0, // counts
type); // parameter
}
-const Operator* CommonOperatorBuilder::EffectPhi(int arguments) {
- DCHECK(arguments > 0); // Disallow empty phis.
+const Operator* CommonOperatorBuilder::EffectPhi(int effect_input_count) {
+ DCHECK(effect_input_count > 0); // Disallow empty effect phis.
+ switch (effect_input_count) {
+#define CACHED_EFFECT_PHI(input_count) \
+ case input_count: \
+ return &cache_.kEffectPhi##input_count##Operator;
+ CACHED_EFFECT_PHI_LIST(CACHED_EFFECT_PHI)
+#undef CACHED_EFFECT_PHI
+ default:
+ break;
+ }
+ // Uncached.
return new (zone()) Operator( // --
IrOpcode::kEffectPhi, Operator::kPure, // opcode
"EffectPhi", // name
- 0, arguments, 1, 0, 1, 0); // counts
+ 0, effect_input_count, 1, 0, 1, 0); // counts
}
@@ -467,6 +587,16 @@ const Operator* CommonOperatorBuilder::Finish(int arguments) {
const Operator* CommonOperatorBuilder::StateValues(int arguments) {
+ switch (arguments) {
+#define CACHED_STATE_VALUES(arguments) \
+ case arguments: \
+ return &cache_.kStateValues##arguments##Operator;
+ CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
+#undef CACHED_STATE_VALUES
+ default:
+ break;
+ }
+ // Uncached.
return new (zone()) Operator( // --
IrOpcode::kStateValues, Operator::kPure, // opcode
"StateValues", // name
@@ -474,6 +604,15 @@ const Operator* CommonOperatorBuilder::StateValues(int arguments) {
}
+const Operator* CommonOperatorBuilder::TypedStateValues(
+ const ZoneVector<MachineType>* types) {
+ return new (zone()) Operator1<const ZoneVector<MachineType>*>( // --
+ IrOpcode::kTypedStateValues, Operator::kPure, // opcode
+ "TypedStateValues", // name
+ static_cast<int>(types->size()), 0, 0, 1, 0, 0, types); // counts
+}
+
+
const Operator* CommonOperatorBuilder::FrameState(
FrameStateType type, BailoutId bailout_id,
OutputFrameStateCombine state_combine, MaybeHandle<JSFunction> jsfunction) {
@@ -493,9 +632,10 @@ const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
IrOpcode::kCall, descriptor->properties(), mnemonic,
descriptor->InputCount() + descriptor->FrameStateCount(),
Operator::ZeroIfPure(descriptor->properties()),
- Operator::ZeroIfPure(descriptor->properties()),
+ Operator::ZeroIfEliminatable(descriptor->properties()),
descriptor->ReturnCount(),
- Operator::ZeroIfPure(descriptor->properties()), 0, descriptor) {}
+ Operator::ZeroIfPure(descriptor->properties()),
+ Operator::ZeroIfNoThrow(descriptor->properties()), descriptor) {}
void PrintParameter(std::ostream& os) const OVERRIDE {
os << "[" << *parameter() << "]";
@@ -506,6 +646,16 @@ const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
const Operator* CommonOperatorBuilder::Projection(size_t index) {
+ switch (index) {
+#define CACHED_PROJECTION(index) \
+ case index: \
+ return &cache_.kProjection##index##Operator;
+ CACHED_PROJECTION_LIST(CACHED_PROJECTION)
+#undef CACHED_PROJECTION
+ default:
+ break;
+ }
+ // Uncached.
return new (zone()) Operator1<size_t>( // --
IrOpcode::kProjection, // opcode
Operator::kFoldable | Operator::kNoThrow, // flags
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index 9f2c575163..23d06eaa9b 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -172,10 +172,13 @@ class CommonOperatorBuilder FINAL : public ZoneObject {
const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
+ const Operator* IfSuccess();
+ const Operator* IfException();
const Operator* Switch(size_t control_output_count);
const Operator* IfValue(int32_t value);
const Operator* IfDefault();
const Operator* Throw();
+ const Operator* Deoptimize();
const Operator* Return();
const Operator* Start(int num_formal_parameters);
@@ -196,12 +199,13 @@ class CommonOperatorBuilder FINAL : public ZoneObject {
const Operator* HeapConstant(const Unique<HeapObject>&);
const Operator* Select(MachineType, BranchHint = BranchHint::kNone);
- const Operator* Phi(MachineType type, int arguments);
- const Operator* EffectPhi(int arguments);
+ const Operator* Phi(MachineType type, int value_input_count);
+ const Operator* EffectPhi(int effect_input_count);
const Operator* EffectSet(int arguments);
const Operator* ValueEffect(int arguments);
const Operator* Finish(int arguments);
const Operator* StateValues(int arguments);
+ const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
const Operator* FrameState(
FrameStateType type, BailoutId bailout_id,
OutputFrameStateCombine state_combine,
diff --git a/deps/v8/src/compiler/control-builders.cc b/deps/v8/src/compiler/control-builders.cc
index 2ace441e61..0e4f1683b8 100644
--- a/deps/v8/src/compiler/control-builders.cc
+++ b/deps/v8/src/compiler/control-builders.cc
@@ -179,21 +179,25 @@ void TryCatchBuilder::EndCatch() {
void TryFinallyBuilder::BeginTry() {
finally_environment_ = environment()->CopyAsUnreachable();
finally_environment_->Push(the_hole());
+ finally_environment_->Push(the_hole());
}
-void TryFinallyBuilder::LeaveTry(Node* token) {
+void TryFinallyBuilder::LeaveTry(Node* token, Node* value) {
+ environment()->Push(value);
environment()->Push(token);
finally_environment_->Merge(environment());
- environment()->Pop();
+ environment()->Drop(2);
}
-void TryFinallyBuilder::EndTry(Node* fallthrough_token) {
+void TryFinallyBuilder::EndTry(Node* fallthrough_token, Node* value) {
+ environment()->Push(value);
environment()->Push(fallthrough_token);
finally_environment_->Merge(environment());
- environment()->Pop();
+ environment()->Drop(2);
token_node_ = finally_environment_->Pop();
+ value_node_ = finally_environment_->Pop();
set_environment(finally_environment_);
}
diff --git a/deps/v8/src/compiler/control-builders.h b/deps/v8/src/compiler/control-builders.h
index c22ee04a98..59970563fb 100644
--- a/deps/v8/src/compiler/control-builders.h
+++ b/deps/v8/src/compiler/control-builders.h
@@ -167,20 +167,25 @@ class TryFinallyBuilder FINAL : public ControlBuilder {
explicit TryFinallyBuilder(AstGraphBuilder* builder)
: ControlBuilder(builder),
finally_environment_(NULL),
- token_node_(NULL) {}
+ token_node_(NULL),
+ value_node_(NULL) {}
// Primitive control commands.
void BeginTry();
- void LeaveTry(Node* token);
- void EndTry(Node* token);
+ void LeaveTry(Node* token, Node* value);
+ void EndTry(Node* token, Node* value);
void EndFinally();
// Returns the dispatch token value inside the 'finally' body.
Node* GetDispatchTokenNode() const { return token_node_; }
+ // Returns the saved result value inside the 'finally' body.
+ Node* GetResultValueNode() const { return value_node_; }
+
private:
Environment* finally_environment_; // Environment for the 'finally' body.
Node* token_node_; // Node for token in 'finally' body.
+ Node* value_node_; // Node for value in 'finally' body.
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/control-equivalence.h b/deps/v8/src/compiler/control-equivalence.h
index db05e3e73d..354d6cfceb 100644
--- a/deps/v8/src/compiler/control-equivalence.h
+++ b/deps/v8/src/compiler/control-equivalence.h
@@ -14,6 +14,11 @@ namespace v8 {
namespace internal {
namespace compiler {
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
+ } while (false)
+
// Determines control dependence equivalence classes for control nodes. Any two
// nodes having the same set of control dependences land in one class. These
// classes can in turn be used to:
@@ -96,16 +101,16 @@ class ControlEquivalence : public ZoneObject {
// Called at pre-visit during DFS walk.
void VisitPre(Node* node) {
- Trace("CEQ: Pre-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+ TRACE("CEQ: Pre-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
// Dispense a new pre-order number.
SetNumber(node, NewDFSNumber());
- Trace(" Assigned DFS number is %d\n", GetNumber(node));
+ TRACE(" Assigned DFS number is %zu\n", GetNumber(node));
}
// Called at mid-visit during DFS walk.
void VisitMid(Node* node, DFSDirection direction) {
- Trace("CEQ: Mid-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+ TRACE("CEQ: Mid-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
BracketList& blist = GetBracketList(node);
// Remove brackets pointing to this node [line:19].
@@ -118,7 +123,7 @@ class ControlEquivalence : public ZoneObject {
}
// Potentially start a new equivalence class [line:37].
- BracketListTrace(blist);
+ BracketListTRACE(blist);
Bracket* recent = &blist.back();
if (recent->recent_size != blist.size()) {
recent->recent_size = blist.size();
@@ -127,12 +132,12 @@ class ControlEquivalence : public ZoneObject {
// Assign equivalence class to node.
SetClass(node, recent->recent_class);
- Trace(" Assigned class number is %d\n", GetClass(node));
+ TRACE(" Assigned class number is %zu\n", GetClass(node));
}
// Called at post-visit during DFS walk.
void VisitPost(Node* node, Node* parent_node, DFSDirection direction) {
- Trace("CEQ: Post-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+ TRACE("CEQ: Post-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
BracketList& blist = GetBracketList(node);
// Remove brackets pointing to this node [line:19].
@@ -147,7 +152,7 @@ class ControlEquivalence : public ZoneObject {
// Called when hitting a back edge in the DFS walk.
void VisitBackedge(Node* from, Node* to, DFSDirection direction) {
- Trace("CEQ: Backedge from #%d:%s to #%d:%s\n", from->id(),
+ TRACE("CEQ: Backedge from #%d:%s to #%d:%s\n", from->id(),
from->op()->mnemonic(), to->id(), to->op()->mnemonic());
// Push backedge onto the bracket list [line:25].
@@ -316,7 +321,7 @@ class ControlEquivalence : public ZoneObject {
void BracketListDelete(BracketList& blist, Node* to, DFSDirection direction) {
for (BracketList::iterator i = blist.begin(); i != blist.end(); /*nop*/) {
if (i->to == to && i->direction != direction) {
- Trace(" BList erased: {%d->%d}\n", i->from->id(), i->to->id());
+ TRACE(" BList erased: {%d->%d}\n", i->from->id(), i->to->id());
i = blist.erase(i);
} else {
++i;
@@ -324,22 +329,13 @@ class ControlEquivalence : public ZoneObject {
}
}
- void BracketListTrace(BracketList& blist) {
+ void BracketListTRACE(BracketList& blist) {
if (FLAG_trace_turbo_scheduler) {
- Trace(" BList: ");
+ TRACE(" BList: ");
for (Bracket bracket : blist) {
- Trace("{%d->%d} ", bracket.from->id(), bracket.to->id());
+ TRACE("{%d->%d} ", bracket.from->id(), bracket.to->id());
}
- Trace("\n");
- }
- }
-
- void Trace(const char* msg, ...) {
- if (FLAG_trace_turbo_scheduler) {
- va_list arguments;
- va_start(arguments, msg);
- base::OS::VPrint(msg, arguments);
- va_end(arguments);
+ TRACE("\n");
}
}
@@ -350,6 +346,8 @@ class ControlEquivalence : public ZoneObject {
Data node_data_; // Per-node data stored as a side-table.
};
+#undef TRACE
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/control-flow-optimizer.cc b/deps/v8/src/compiler/control-flow-optimizer.cc
index 1a2b4cdfd8..f074f72602 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.cc
+++ b/deps/v8/src/compiler/control-flow-optimizer.cc
@@ -46,21 +46,172 @@ void ControlFlowOptimizer::Enqueue(Node* node) {
void ControlFlowOptimizer::VisitNode(Node* node) {
- for (Node* use : node->uses()) {
- if (NodeProperties::IsControl(use)) Enqueue(use);
+ for (Edge edge : node->use_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ Enqueue(edge.from());
+ }
}
}
void ControlFlowOptimizer::VisitBranch(Node* node) {
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+ if (TryBuildSwitch(node)) return;
+ if (TryCloneBranch(node)) return;
+ VisitNode(node);
+}
+
+
+bool ControlFlowOptimizer::TryCloneBranch(Node* node) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
+
+ // This optimization is a special case of (super)block cloning. It takes an
+ // input graph as shown below and clones the Branch node for every predecessor
+ // to the Merge, essentially removing the Merge completely. This avoids
+ // materializing the bit for the Phi and may offer potential for further
+ // branch folding optimizations (i.e. because one or more inputs to the Phi is
+ // a constant). Note that there may be more Phi nodes hanging off the Merge,
+ // but we can only a certain subset of them currently (actually only Phi and
+ // EffectPhi nodes whose uses have either the IfTrue or IfFalse as control
+ // input).
+
+ // Control1 ... ControlN
+ // ^ ^
+ // | | Cond1 ... CondN
+ // +----+ +----+ ^ ^
+ // | | | |
+ // | | +----+ |
+ // Merge<--+ | +------------+
+ // ^ \|/
+ // | Phi
+ // | |
+ // Branch----+
+ // ^
+ // |
+ // +-----+-----+
+ // | |
+ // IfTrue IfFalse
+ // ^ ^
+ // | |
+
+ // The resulting graph (modulo the Phi and EffectPhi nodes) looks like this:
+
+ // Control1 Cond1 ... ControlN CondN
+ // ^ ^ ^ ^
+ // \ / \ /
+ // Branch ... Branch
+ // ^ ^
+ // | |
+ // +---+---+ +---+----+
+ // | | | |
+ // IfTrue IfFalse ... IfTrue IfFalse
+ // ^ ^ ^ ^
+ // | | | |
+ // +--+ +-------------+ |
+ // | | +--------------+ +--+
+ // | | | |
+ // Merge Merge
+ // ^ ^
+ // | |
+
+ Node* branch = node;
+ Node* cond = NodeProperties::GetValueInput(branch, 0);
+ if (!cond->OwnedBy(branch) || cond->opcode() != IrOpcode::kPhi) return false;
+ Node* merge = NodeProperties::GetControlInput(branch);
+ if (merge->opcode() != IrOpcode::kMerge ||
+ NodeProperties::GetControlInput(cond) != merge) {
+ return false;
+ }
+ // Grab the IfTrue/IfFalse projections of the Branch.
+ Node* control_projections[2];
+ NodeProperties::CollectControlProjections(branch, control_projections,
+ arraysize(control_projections));
+ Node* if_true = control_projections[0];
+ Node* if_false = control_projections[1];
+ DCHECK_EQ(IrOpcode::kIfTrue, if_true->opcode());
+ DCHECK_EQ(IrOpcode::kIfFalse, if_false->opcode());
+ // Check/collect other Phi/EffectPhi nodes hanging off the Merge.
+ NodeVector phis(zone());
+ for (Node* const use : merge->uses()) {
+ if (use == branch || use == cond) continue;
+ // We cannot currently deal with non-Phi/EffectPhi nodes hanging off the
+ // Merge. Ideally, we would just clone the nodes (and everything that
+ // depends on it to some distant join point), but that requires knowledge
+ // about dominance/post-dominance.
+ if (!NodeProperties::IsPhi(use)) return false;
+ for (Edge edge : use->use_edges()) {
+ // Right now we can only handle Phi/EffectPhi nodes whose uses are
+ // directly control-dependend on either the IfTrue or the IfFalse
+ // successor, because we know exactly how to update those uses.
+ // TODO(turbofan): Generalize this to all Phi/EffectPhi nodes using
+ // dominance/post-dominance on the sea of nodes.
+ if (edge.from()->op()->ControlInputCount() != 1) return false;
+ Node* control = NodeProperties::GetControlInput(edge.from());
+ if (NodeProperties::IsPhi(edge.from())) {
+ control = NodeProperties::GetControlInput(control, edge.index());
+ }
+ if (control != if_true && control != if_false) return false;
+ }
+ phis.push_back(use);
+ }
+ BranchHint const hint = BranchHintOf(branch->op());
+ int const input_count = merge->op()->ControlInputCount();
+ DCHECK_LE(1, input_count);
+ Node** const inputs = zone()->NewArray<Node*>(2 * input_count);
+ Node** const merge_true_inputs = &inputs[0];
+ Node** const merge_false_inputs = &inputs[input_count];
+ for (int index = 0; index < input_count; ++index) {
+ Node* cond1 = NodeProperties::GetValueInput(cond, index);
+ Node* control1 = NodeProperties::GetControlInput(merge, index);
+ Node* branch1 = graph()->NewNode(common()->Branch(hint), cond1, control1);
+ merge_true_inputs[index] = graph()->NewNode(common()->IfTrue(), branch1);
+ merge_false_inputs[index] = graph()->NewNode(common()->IfFalse(), branch1);
+ Enqueue(branch1);
+ }
+ Node* const merge_true = graph()->NewNode(common()->Merge(input_count),
+ input_count, merge_true_inputs);
+ Node* const merge_false = graph()->NewNode(common()->Merge(input_count),
+ input_count, merge_false_inputs);
+ for (Node* const phi : phis) {
+ for (int index = 0; index < input_count; ++index) {
+ inputs[index] = phi->InputAt(index);
+ }
+ inputs[input_count] = merge_true;
+ Node* phi_true = graph()->NewNode(phi->op(), input_count + 1, inputs);
+ inputs[input_count] = merge_false;
+ Node* phi_false = graph()->NewNode(phi->op(), input_count + 1, inputs);
+ for (Edge edge : phi->use_edges()) {
+ Node* control = NodeProperties::GetControlInput(edge.from());
+ if (NodeProperties::IsPhi(edge.from())) {
+ control = NodeProperties::GetControlInput(control, edge.index());
+ }
+ DCHECK(control == if_true || control == if_false);
+ edge.UpdateTo((control == if_true) ? phi_true : phi_false);
+ }
+ phi->Kill();
+ }
+ // Fix up IfTrue and IfFalse and kill all dead nodes.
+ if_false->ReplaceUses(merge_false);
+ if_true->ReplaceUses(merge_true);
+ if_false->Kill();
+ if_true->Kill();
+ branch->Kill();
+ cond->Kill();
+ merge->Kill();
+ return true;
+}
+
+
+bool ControlFlowOptimizer::TryBuildSwitch(Node* node) {
+ DCHECK_EQ(IrOpcode::kBranch, node->opcode());
Node* branch = node;
+ if (BranchHintOf(branch->op()) != BranchHint::kNone) return false;
Node* cond = NodeProperties::GetValueInput(branch, 0);
- if (cond->opcode() != IrOpcode::kWord32Equal) return VisitNode(node);
+ if (cond->opcode() != IrOpcode::kWord32Equal) return false;
Int32BinopMatcher m(cond);
Node* index = m.left().node();
- if (!m.right().HasValue()) return VisitNode(node);
+ if (!m.right().HasValue()) return false;
int32_t value = m.right().Value();
ZoneSet<int32_t> values(zone());
values.insert(value);
@@ -79,6 +230,7 @@ void ControlFlowOptimizer::VisitBranch(Node* node) {
if (it == if_false->uses().end()) break;
Node* branch1 = *it++;
if (branch1->opcode() != IrOpcode::kBranch) break;
+ if (BranchHintOf(branch1->op()) != BranchHint::kNone) break;
if (it != if_false->uses().end()) break;
Node* cond1 = branch1->InputAt(0);
if (cond1->opcode() != IrOpcode::kWord32Equal) break;
@@ -90,11 +242,11 @@ void ControlFlowOptimizer::VisitBranch(Node* node) {
DCHECK_NE(value, value1);
if (branch != node) {
- branch->RemoveAllInputs();
+ branch->NullAllInputs();
if_true->ReplaceInput(0, node);
}
if_true->set_op(common()->IfValue(value));
- if_false->RemoveAllInputs();
+ if_false->NullAllInputs();
Enqueue(if_true);
branch = branch1;
@@ -108,20 +260,19 @@ void ControlFlowOptimizer::VisitBranch(Node* node) {
DCHECK_EQ(IrOpcode::kIfFalse, if_false->opcode());
if (branch == node) {
DCHECK_EQ(1u, values.size());
- Enqueue(if_true);
- Enqueue(if_false);
- } else {
- DCHECK_LT(1u, values.size());
- node->set_op(common()->Switch(values.size() + 1));
- node->ReplaceInput(0, index);
- if_true->set_op(common()->IfValue(value));
- if_true->ReplaceInput(0, node);
- Enqueue(if_true);
- if_false->set_op(common()->IfDefault());
- if_false->ReplaceInput(0, node);
- Enqueue(if_false);
- branch->RemoveAllInputs();
+ return false;
}
+ DCHECK_LT(1u, values.size());
+ node->set_op(common()->Switch(values.size() + 1));
+ node->ReplaceInput(0, index);
+ if_true->set_op(common()->IfValue(value));
+ if_true->ReplaceInput(0, node);
+ Enqueue(if_true);
+ if_false->set_op(common()->IfDefault());
+ if_false->ReplaceInput(0, node);
+ Enqueue(if_false);
+ branch->NullAllInputs();
+ return true;
}
diff --git a/deps/v8/src/compiler/control-flow-optimizer.h b/deps/v8/src/compiler/control-flow-optimizer.h
index fb96e01734..d301021715 100644
--- a/deps/v8/src/compiler/control-flow-optimizer.h
+++ b/deps/v8/src/compiler/control-flow-optimizer.h
@@ -31,6 +31,9 @@ class ControlFlowOptimizer FINAL {
void VisitNode(Node* node);
void VisitBranch(Node* node);
+ bool TryBuildSwitch(Node* node);
+ bool TryCloneBranch(Node* node);
+
CommonOperatorBuilder* common() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/control-reducer.cc b/deps/v8/src/compiler/control-reducer.cc
index d20c8dd806..8b41d19d1c 100644
--- a/deps/v8/src/compiler/control-reducer.cc
+++ b/deps/v8/src/compiler/control-reducer.cc
@@ -15,6 +15,11 @@ namespace v8 {
namespace internal {
namespace compiler {
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_reduction) PrintF(__VA_ARGS__); \
+ } while (false)
+
enum VisitState { kUnvisited = 0, kOnStack = 1, kRevisit = 2, kVisited = 3 };
enum Decision { kFalse, kUnknown, kTrue };
@@ -42,9 +47,6 @@ class ReachabilityMarker : public NodeMarker<uint8_t> {
};
-#define TRACE(x) \
- if (FLAG_trace_turbo_reduction) PrintF x
-
class ControlReducerImpl {
public:
ControlReducerImpl(Zone* zone, JSGraph* jsgraph,
@@ -104,41 +106,46 @@ class ControlReducerImpl {
marked.Push(start);
marked.SetReachableFromStart(start);
- // We use a stack of (Node, Node::Uses::const_iterator) pairs to avoid
+ // We use a stack of (Node, Node::UseEdges::iterator) pairs to avoid
// O(n^2) traversal.
- typedef std::pair<Node*, Node::Uses::const_iterator> FwIter;
+ typedef std::pair<Node*, Node::UseEdges::iterator> FwIter;
ZoneVector<FwIter> fw_stack(zone_);
- fw_stack.push_back(FwIter(start, start->uses().begin()));
+ fw_stack.push_back(FwIter(start, start->use_edges().begin()));
while (!fw_stack.empty()) {
Node* node = fw_stack.back().first;
- TRACE(("ControlFw: #%d:%s\n", node->id(), node->op()->mnemonic()));
+ TRACE("ControlFw: #%d:%s\n", node->id(), node->op()->mnemonic());
bool pop = true;
- while (fw_stack.back().second != node->uses().end()) {
- Node* succ = *(fw_stack.back().second);
- if (marked.IsOnStack(succ) && !marked.IsReachableFromEnd(succ)) {
- // {succ} is on stack and not reachable from end.
- Node* added = ConnectNTL(succ);
- nodes.push_back(added);
- marked.SetReachableFromEnd(added);
- AddBackwardsReachableNodes(marked, nodes, nodes.size() - 1);
-
- // Reset the use iterators for the entire stack.
- for (size_t i = 0; i < fw_stack.size(); i++) {
- FwIter& iter = fw_stack[i];
- fw_stack[i] = FwIter(iter.first, iter.first->uses().begin());
+ while (fw_stack.back().second != node->use_edges().end()) {
+ Edge edge = *(fw_stack.back().second);
+ if (NodeProperties::IsControlEdge(edge) &&
+ edge.from()->op()->ControlOutputCount() > 0) {
+ // Only walk control edges to control nodes.
+ Node* succ = edge.from();
+
+ if (marked.IsOnStack(succ) && !marked.IsReachableFromEnd(succ)) {
+ // {succ} is on stack and not reachable from end.
+ Node* added = ConnectNTL(succ);
+ nodes.push_back(added);
+ marked.SetReachableFromEnd(added);
+ AddBackwardsReachableNodes(marked, nodes, nodes.size() - 1);
+
+ // Reset the use iterators for the entire stack.
+ for (size_t i = 0; i < fw_stack.size(); i++) {
+ FwIter& iter = fw_stack[i];
+ fw_stack[i] = FwIter(iter.first, iter.first->use_edges().begin());
+ }
+ pop = false; // restart traversing successors of this node.
+ break;
+ }
+ if (!marked.IsReachableFromStart(succ)) {
+ // {succ} is not yet reached from start.
+ marked.Push(succ);
+ marked.SetReachableFromStart(succ);
+ fw_stack.push_back(FwIter(succ, succ->use_edges().begin()));
+ pop = false; // "recurse" into successor control node.
+ break;
}
- pop = false; // restart traversing successors of this node.
- break;
- }
- if (NodeProperties::IsControl(succ) &&
- !marked.IsReachableFromStart(succ)) {
- // {succ} is a control node and not yet reached from start.
- marked.Push(succ);
- marked.SetReachableFromStart(succ);
- fw_stack.push_back(FwIter(succ, succ->uses().begin()));
- pop = false; // "recurse" into successor control node.
- break;
}
++fw_stack.back().second;
}
@@ -155,7 +162,7 @@ class ControlReducerImpl {
// Any control nodes not reachable from start are dead, even loops.
for (size_t i = 0; i < nodes.size(); i++) {
Node* node = nodes[i];
- if (NodeProperties::IsControl(node) &&
+ if (node->op()->ControlOutputCount() > 0 &&
!marked.IsReachableFromStart(node)) {
ReplaceNode(node, dead()); // uses will be added to revisit queue.
}
@@ -165,7 +172,8 @@ class ControlReducerImpl {
// Connect {loop}, the header of a non-terminating loop, to the end node.
Node* ConnectNTL(Node* loop) {
- TRACE(("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic()));
+ TRACE("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic());
+ DCHECK_EQ(IrOpcode::kLoop, loop->opcode());
Node* always = graph()->NewNode(common_->Always());
// Mark the node as visited so that we can revisit later.
@@ -190,16 +198,14 @@ class ControlReducerImpl {
DCHECK(NodeProperties::IsControlEdge(edge));
if (edge.from() == branch) continue;
switch (edge.from()->opcode()) {
-#define CASE(Opcode) case IrOpcode::k##Opcode:
- CONTROL_OP_LIST(CASE)
-#undef CASE
- // Update all control nodes (except {branch}) pointing to the {loop}.
- edge.UpdateTo(if_true);
+ case IrOpcode::kPhi:
break;
case IrOpcode::kEffectPhi:
effects.push_back(edge.from());
break;
default:
+ // Update all control edges (except {branch}) pointing to the {loop}.
+ edge.UpdateTo(if_true);
break;
}
}
@@ -282,9 +288,9 @@ class ControlReducerImpl {
for (Edge edge : node->use_edges()) {
Node* use = edge.from();
if (!marked.IsReachableFromEnd(use)) {
- TRACE(("DeadLink: #%d:%s(%d) -> #%d:%s\n", use->id(),
- use->op()->mnemonic(), edge.index(), node->id(),
- node->op()->mnemonic()));
+ TRACE("DeadLink: #%d:%s(%d) -> #%d:%s\n", use->id(),
+ use->op()->mnemonic(), edge.index(), node->id(),
+ node->op()->mnemonic());
edge.UpdateTo(NULL);
}
}
@@ -324,7 +330,7 @@ class ControlReducerImpl {
if (node->IsDead()) return Pop(); // Node was killed while on stack.
- TRACE(("ControlReduce: #%d:%s\n", node->id(), node->op()->mnemonic()));
+ TRACE("ControlReduce: #%d:%s\n", node->id(), node->op()->mnemonic());
// Recurse on an input if necessary.
for (Node* const input : node->inputs()) {
@@ -376,7 +382,7 @@ class ControlReducerImpl {
void Revisit(Node* node) {
size_t id = static_cast<size_t>(node->id());
if (id < state_.size() && state_[id] == kVisited) {
- TRACE((" Revisit #%d:%s\n", node->id(), node->op()->mnemonic()));
+ TRACE(" Revisit #%d:%s\n", node->id(), node->op()->mnemonic());
state_[id] = kRevisit;
revisit_.push_back(node);
}
@@ -400,7 +406,7 @@ class ControlReducerImpl {
// If a node has only one control input and it is dead, replace with dead.
Node* control = NodeProperties::GetControlInput(node);
if (control->opcode() == IrOpcode::kDead) {
- TRACE(("ControlDead: #%d:%s\n", node->id(), node->op()->mnemonic()));
+ TRACE("ControlDead: #%d:%s\n", node->id(), node->op()->mnemonic());
return control;
}
}
@@ -509,10 +515,8 @@ class ControlReducerImpl {
index++;
}
- if (live > 1 && live == node->InputCount()) return node; // nothing to do.
-
- TRACE(("ReduceMerge: #%d:%s (%d live)\n", node->id(),
- node->op()->mnemonic(), live));
+ TRACE("ReduceMerge: #%d:%s (%d of %d live)\n", node->id(),
+ node->op()->mnemonic(), live, index);
if (live == 0) return dead(); // no remaining inputs.
@@ -529,15 +533,46 @@ class ControlReducerImpl {
return node->InputAt(live_index);
}
- // Edit phis in place, removing dead inputs and revisiting them.
- for (Node* const phi : phis) {
- TRACE((" PhiInMerge: #%d:%s (%d live)\n", phi->id(),
- phi->op()->mnemonic(), live));
- RemoveDeadInputs(node, phi);
- Revisit(phi);
+ DCHECK_LE(2, live);
+
+ if (live < node->InputCount()) {
+ // Edit phis in place, removing dead inputs and revisiting them.
+ for (Node* const phi : phis) {
+ TRACE(" PhiInMerge: #%d:%s (%d live)\n", phi->id(),
+ phi->op()->mnemonic(), live);
+ RemoveDeadInputs(node, phi);
+ Revisit(phi);
+ }
+ // Edit the merge in place, removing dead inputs.
+ RemoveDeadInputs(node, node);
+ }
+
+ DCHECK_EQ(live, node->InputCount());
+
+ // Check if it's an unused diamond.
+ if (live == 2 && phis.empty()) {
+ Node* node0 = node->InputAt(0);
+ Node* node1 = node->InputAt(1);
+ if (((node0->opcode() == IrOpcode::kIfTrue &&
+ node1->opcode() == IrOpcode::kIfFalse) ||
+ (node1->opcode() == IrOpcode::kIfTrue &&
+ node0->opcode() == IrOpcode::kIfFalse)) &&
+ node0->OwnedBy(node) && node1->OwnedBy(node)) {
+ Node* branch0 = NodeProperties::GetControlInput(node0);
+ Node* branch1 = NodeProperties::GetControlInput(node1);
+ if (branch0 == branch1) {
+ // It's a dead diamond, i.e. neither the IfTrue nor the IfFalse nodes
+ // have users except for the Merge and the Merge has no Phi or
+ // EffectPhi uses, so replace the Merge with the control input of the
+ // diamond.
+ TRACE(" DeadDiamond: #%d:%s #%d:%s #%d:%s\n", node0->id(),
+ node0->op()->mnemonic(), node1->id(), node1->op()->mnemonic(),
+ branch0->id(), branch0->op()->mnemonic());
+ return NodeProperties::GetControlInput(branch0);
+ }
+ }
}
- // Edit the merge in place, removing dead inputs.
- RemoveDeadInputs(node, node);
+
return node;
}
@@ -548,8 +583,8 @@ class ControlReducerImpl {
Decision result = DecideCondition(branch->InputAt(0));
if (result == kTrue) {
// fold a true branch by replacing IfTrue with the branch control.
- TRACE(("BranchReduce: #%d:%s => #%d:%s\n", branch->id(),
- branch->op()->mnemonic(), node->id(), node->op()->mnemonic()));
+ TRACE(" BranchReduce: #%d:%s => #%d:%s\n", branch->id(),
+ branch->op()->mnemonic(), node->id(), node->op()->mnemonic());
return branch->InputAt(1);
}
return result == kUnknown ? node : dead();
@@ -562,8 +597,8 @@ class ControlReducerImpl {
Decision result = DecideCondition(branch->InputAt(0));
if (result == kFalse) {
// fold a false branch by replacing IfFalse with the branch control.
- TRACE(("BranchReduce: #%d:%s => #%d:%s\n", branch->id(),
- branch->op()->mnemonic(), node->id(), node->op()->mnemonic()));
+ TRACE(" BranchReduce: #%d:%s => #%d:%s\n", branch->id(),
+ branch->op()->mnemonic(), node->id(), node->op()->mnemonic());
return branch->InputAt(1);
}
return result == kUnknown ? node : dead();
@@ -595,9 +630,8 @@ class ControlReducerImpl {
// Replace uses of {node} with {replacement} and revisit the uses.
void ReplaceNode(Node* node, Node* replacement) {
if (node == replacement) return;
- TRACE((" Replace: #%d:%s with #%d:%s\n", node->id(),
- node->op()->mnemonic(), replacement->id(),
- replacement->op()->mnemonic()));
+ TRACE(" Replace: #%d:%s with #%d:%s\n", node->id(), node->op()->mnemonic(),
+ replacement->id(), replacement->op()->mnemonic());
for (Node* const use : node->uses()) {
// Don't revisit this node if it refers to itself.
if (use != node) Revisit(use);
diff --git a/deps/v8/src/compiler/generic-algorithm.h b/deps/v8/src/compiler/generic-algorithm.h
deleted file mode 100644
index 391757ecb8..0000000000
--- a/deps/v8/src/compiler/generic-algorithm.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_
-#define V8_COMPILER_GENERIC_ALGORITHM_H_
-
-#include <stack>
-#include <vector>
-
-#include "src/compiler/graph.h"
-#include "src/compiler/node.h"
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class Graph;
-class Node;
-
-// GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and
-// post-order. Visitation uses an explicitly allocated stack rather than the
-// execution stack to avoid stack overflow.
-class GenericGraphVisit {
- public:
- // struct Visitor {
- // void Pre(Node* current);
- // void Post(Node* current);
- // void PreEdge(Node* from, int index, Node* to);
- // void PostEdge(Node* from, int index, Node* to);
- // }
- template <class Visitor>
- static void Visit(Graph* graph, Zone* zone, Node** root_begin,
- Node** root_end, Visitor* visitor) {
- typedef typename Node::InputEdges::iterator Iterator;
- typedef std::pair<Iterator, Iterator> NodeState;
- typedef std::stack<NodeState, ZoneDeque<NodeState> > NodeStateStack;
- NodeStateStack stack((ZoneDeque<NodeState>(zone)));
- BoolVector visited(graph->NodeCount(), false, zone);
- Node* current = *root_begin;
- while (true) {
- DCHECK(current != NULL);
- const int id = current->id();
- DCHECK(id >= 0);
- DCHECK(id < graph->NodeCount()); // Must be a valid id.
- bool visit = !GetVisited(&visited, id);
- if (visit) {
- visitor->Pre(current);
- SetVisited(&visited, id);
- }
- Iterator begin(visit ? current->input_edges().begin()
- : current->input_edges().end());
- Iterator end(current->input_edges().end());
- stack.push(NodeState(begin, end));
- Node* post_order_node = current;
- while (true) {
- NodeState top = stack.top();
- if (top.first == top.second) {
- if (visit) {
- visitor->Post(post_order_node);
- SetVisited(&visited, post_order_node->id());
- }
- stack.pop();
- if (stack.empty()) {
- if (++root_begin == root_end) return;
- current = *root_begin;
- break;
- }
- post_order_node = (*stack.top().first).from();
- visit = true;
- } else {
- visitor->PreEdge((*top.first).from(), (*top.first).index(),
- (*top.first).to());
- current = (*top.first).to();
- if (!GetVisited(&visited, current->id())) break;
- }
- top = stack.top();
- visitor->PostEdge((*top.first).from(), (*top.first).index(),
- (*top.first).to());
- ++stack.top().first;
- }
- }
- }
-
- template <class Visitor>
- static void Visit(Graph* graph, Zone* zone, Node* current, Visitor* visitor) {
- Node* array[] = {current};
- Visit<Visitor>(graph, zone, &array[0], &array[1], visitor);
- }
-
- struct NullNodeVisitor {
- void Pre(Node* node) {}
- void Post(Node* node) {}
- void PreEdge(Node* from, int index, Node* to) {}
- void PostEdge(Node* from, int index, Node* to) {}
- };
-
- private:
- static void SetVisited(BoolVector* visited, int id) {
- if (id >= static_cast<int>(visited->size())) {
- // Resize and set all values to unvisited.
- visited->resize((3 * id) / 2, false);
- }
- visited->at(id) = true;
- }
-
- static bool GetVisited(BoolVector* visited, int id) {
- if (id >= static_cast<int>(visited->size())) return false;
- return visited->at(id);
- }
-};
-
-typedef GenericGraphVisit::NullNodeVisitor NullNodeVisitor;
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_GENERIC_ALGORITHM_H_
diff --git a/deps/v8/src/compiler/graph-inl.h b/deps/v8/src/compiler/graph-inl.h
deleted file mode 100644
index 3a21737c6e..0000000000
--- a/deps/v8/src/compiler/graph-inl.h
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GRAPH_INL_H_
-#define V8_COMPILER_GRAPH_INL_H_
-
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/graph.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <class Visitor>
-void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
- Zone tmp_zone;
- GenericGraphVisit::Visit<Visitor>(this, &tmp_zone, end(), visitor);
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_GRAPH_INL_H_
diff --git a/deps/v8/src/compiler/graph-visualizer.cc b/deps/v8/src/compiler/graph-visualizer.cc
index 42d355fb1d..2d313f356d 100644
--- a/deps/v8/src/compiler/graph-visualizer.cc
+++ b/deps/v8/src/compiler/graph-visualizer.cc
@@ -27,9 +27,9 @@ namespace compiler {
FILE* OpenVisualizerLogFile(CompilationInfo* info, const char* phase,
const char* suffix, const char* mode) {
- EmbeddedVector<char, 256> filename;
+ EmbeddedVector<char, 256> filename(0);
SmartArrayPointer<char> function_name;
- if (!info->shared_info().is_null()) {
+ if (info->has_shared_info()) {
function_name = info->shared_info()->DebugName()->ToCString();
if (strlen(function_name.get()) > 0) {
SNPrintF(filename, "turbo-%s", function_name.get());
@@ -361,9 +361,17 @@ void GraphVisualizer::Print() {
<< " concentrate=\"true\"\n"
<< " \n";
+ // Find all nodes that are not reachable from end that use live nodes.
+ std::set<Node*> gray;
+ for (Node* const node : all_.live) {
+ for (Node* const use : node->uses()) {
+ if (!all_.IsLive(use)) gray.insert(use);
+ }
+ }
+
// Make sure all nodes have been output before writing out the edges.
for (Node* const node : all_.live) PrintNode(node, false);
- for (Node* const node : all_.gray) PrintNode(node, true);
+ for (Node* const node : gray) PrintNode(node, true);
// With all the nodes written, add the edges.
for (Node* const node : all_.live) {
@@ -398,7 +406,7 @@ class GraphC1Visualizer {
void PrintStringProperty(const char* name, const char* value);
void PrintLongProperty(const char* name, int64_t value);
void PrintIntProperty(const char* name, int value);
- void PrintBlockProperty(const char* name, BasicBlock::Id block_id);
+ void PrintBlockProperty(const char* name, int rpo_number);
void PrintNodeId(Node* n);
void PrintNode(Node* n);
void PrintInputs(Node* n);
@@ -461,10 +469,9 @@ void GraphC1Visualizer::PrintLongProperty(const char* name, int64_t value) {
}
-void GraphC1Visualizer::PrintBlockProperty(const char* name,
- BasicBlock::Id block_id) {
+void GraphC1Visualizer::PrintBlockProperty(const char* name, int rpo_number) {
PrintIndent();
- os_ << name << " \"B" << block_id << "\"\n";
+ os_ << name << " \"B" << rpo_number << "\"\n";
}
@@ -550,21 +557,21 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
for (size_t i = 0; i < rpo->size(); i++) {
BasicBlock* current = (*rpo)[i];
Tag block_tag(this, "block");
- PrintBlockProperty("name", current->id());
+ PrintBlockProperty("name", current->rpo_number());
PrintIntProperty("from_bci", -1);
PrintIntProperty("to_bci", -1);
PrintIndent();
os_ << "predecessors";
for (BasicBlock* predecessor : current->predecessors()) {
- os_ << " \"B" << predecessor->id() << "\"";
+ os_ << " \"B" << predecessor->rpo_number() << "\"";
}
os_ << "\n";
PrintIndent();
os_ << "successors";
for (BasicBlock* successor : current->successors()) {
- os_ << " \"B" << successor->id() << "\"";
+ os_ << " \"B" << successor->rpo_number() << "\"";
}
os_ << "\n";
@@ -575,13 +582,14 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
os_ << "flags\n";
if (current->dominator() != NULL) {
- PrintBlockProperty("dominator", current->dominator()->id());
+ PrintBlockProperty("dominator", current->dominator()->rpo_number());
}
PrintIntProperty("loop_depth", current->loop_depth());
const InstructionBlock* instruction_block =
- instructions->InstructionBlockAt(current->GetRpoNumber());
+ instructions->InstructionBlockAt(
+ RpoNumber::FromInt(current->rpo_number()));
if (instruction_block->code_start() >= 0) {
int first_index = instruction_block->first_instruction_index();
int last_index = instruction_block->last_instruction_index();
@@ -646,11 +654,11 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
if (current->control_input() != NULL) {
PrintNode(current->control_input());
} else {
- os_ << -1 - current->id().ToInt() << " Goto";
+ os_ << -1 - current->rpo_number() << " Goto";
}
os_ << " ->";
for (BasicBlock* successor : current->successors()) {
- os_ << " B" << successor->id();
+ os_ << " B" << successor->rpo_number();
}
if (FLAG_trace_turbo_types && current->control_input() != NULL) {
os_ << " ";
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index d20848918d..316333ba89 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -24,9 +24,11 @@ class IA32OperandConverter : public InstructionOperandConverter {
IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+ Operand InputOperand(size_t index, int extra = 0) {
+ return ToOperand(instr_->InputAt(index), extra);
+ }
- Immediate InputImmediate(int index) {
+ Immediate InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
@@ -75,8 +77,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Immediate(-1);
}
- static int NextOffset(int* offset) {
- int i = *offset;
+ static size_t NextOffset(size_t* offset) {
+ size_t i = *offset;
(*offset)++;
return i;
}
@@ -91,7 +93,7 @@ class IA32OperandConverter : public InstructionOperandConverter {
return static_cast<ScaleFactor>(scale);
}
- Operand MemoryOperand(int* offset) {
+ Operand MemoryOperand(size_t* offset) {
AddressingMode mode = AddressingModeField::decode(instr_->opcode());
switch (mode) {
case kMode_MR: {
@@ -154,7 +156,7 @@ class IA32OperandConverter : public InstructionOperandConverter {
return Operand(no_reg, 0);
}
- Operand MemoryOperand(int first_input = 0) {
+ Operand MemoryOperand(size_t first_input = 0) {
return MemoryOperand(&first_input);
}
};
@@ -162,7 +164,7 @@ class IA32OperandConverter : public InstructionOperandConverter {
namespace {
-bool HasImmediateInput(Instruction* instr, int index) {
+bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
@@ -292,7 +294,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register reg = i.InputRegister(0);
__ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchCallJSFunction: {
@@ -304,7 +306,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Assert(equal, kWrongFunctionContext);
}
__ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchJmp:
@@ -319,6 +321,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchNop:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
@@ -439,6 +447,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ror_cl(i.OutputOperand());
}
break;
+ case kIA32Lzcnt:
+ __ Lzcnt(i.OutputRegister(), i.InputOperand(0));
+ break;
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
@@ -454,6 +465,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEFloat64Div:
__ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
+ case kSSEFloat64Max:
+ __ maxsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
+ case kSSEFloat64Min:
+ __ minsd(i.InputDoubleRegister(0), i.InputOperand(1));
+ break;
case kSSEFloat64Mod: {
// TODO(dcarney): alignment is wrong.
__ sub(esp, Immediate(kDoubleSize));
@@ -482,22 +499,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEFloat64Sqrt:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat64Floor: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundDown);
- break;
- }
- case kSSEFloat64Ceil: {
+ case kSSEFloat64Round: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundUp);
- break;
- }
- case kSSEFloat64RoundTruncate: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundToZero);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
case kSSECvtss2sd:
@@ -523,6 +529,29 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kSSEUint32ToFloat64:
__ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
break;
+ case kSSEFloat64ExtractLowWord32:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ mov(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kSSEFloat64ExtractHighWord32:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
+ } else {
+ __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
+ }
+ break;
+ case kSSEFloat64InsertLowWord32:
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
+ break;
+ case kSSEFloat64InsertHighWord32:
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
+ break;
+ case kSSEFloat64LoadLowWord32:
+ __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ break;
case kAVXFloat64Add: {
CpuFeatureScope avx_scope(masm(), AVX);
__ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
@@ -547,6 +576,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
i.InputOperand(1));
break;
}
+ case kAVXFloat64Max: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vmaxsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
+ case kAVXFloat64Min: {
+ CpuFeatureScope avx_scope(masm(), AVX);
+ __ vminsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
+ break;
+ }
case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
@@ -554,7 +595,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32Movb: {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov_b(operand, i.InputInt8(index));
@@ -570,7 +611,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32Movw: {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov_w(operand, i.InputInt16(index));
@@ -583,7 +624,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov(operand, i.InputImmediate(index));
@@ -596,7 +637,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->HasOutput()) {
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
}
@@ -605,7 +646,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ movss(operand, i.InputDoubleRegister(index));
}
@@ -699,6 +740,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
break;
+ case kIA32StackCheck: {
+ ExternalReference const stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ break;
+ }
}
}
@@ -759,7 +806,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
@@ -868,9 +915,10 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
@@ -1043,6 +1091,8 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
stack_slots -= frame()->GetOsrStackSlotCount();
}
@@ -1117,7 +1167,19 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
Constant src_constant = g.ToConstant(source);
if (src_constant.type() == Constant::kHeapObject) {
Handle<HeapObject> src = src_constant.ToHeapObject();
- if (destination->IsRegister()) {
+ if (info()->IsOptimizing() && src.is_identical_to(info()->context())) {
+ // Loading the context from the frame is way cheaper than materializing
+ // the actual context heap object address.
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ mov(dst, Operand(ebp, StandardFrameConstants::kContextOffset));
+ } else {
+ DCHECK(destination->IsStackSlot());
+ Operand dst = g.ToOperand(destination);
+ __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ pop(dst);
+ }
+ } else if (destination->IsRegister()) {
Register dst = g.ToRegister(destination);
__ LoadHeapObject(dst, src);
} else {
diff --git a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
index ec9fd188fc..b7a9e82f6b 100644
--- a/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/ia32/instruction-codes-ia32.h
@@ -30,26 +30,34 @@ namespace compiler {
V(IA32Shr) \
V(IA32Sar) \
V(IA32Ror) \
+ V(IA32Lzcnt) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
V(SSEFloat64Mul) \
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
+ V(SSEFloat64Max) \
+ V(SSEFloat64Min) \
V(SSEFloat64Sqrt) \
- V(SSEFloat64Floor) \
- V(SSEFloat64Ceil) \
- V(SSEFloat64RoundTruncate) \
+ V(SSEFloat64Round) \
V(SSECvtss2sd) \
V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
+ V(AVXFloat64Max) \
+ V(AVXFloat64Min) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
@@ -61,7 +69,8 @@ namespace compiler {
V(IA32Movsd) \
V(IA32Lea) \
V(IA32Push) \
- V(IA32StoreWriteBarrier)
+ V(IA32StoreWriteBarrier) \
+ V(IA32StackCheck)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index beec701903..5835d13ee8 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -17,10 +17,15 @@ class IA32OperandGenerator FINAL : public OperandGenerator {
: OperandGenerator(selector) {}
InstructionOperand UseByteRegister(Node* node) {
- // TODO(dcarney): relax constraint.
+ // TODO(titzer): encode byte register use constraints.
return UseFixed(node, edx);
}
+ InstructionOperand DefineAsByteRegister(Node* node) {
+ // TODO(titzer): encode byte register def constraints.
+ return DefineAsRegister(node);
+ }
+
bool CanBeImmediate(Node* node) {
switch (node->opcode()) {
case IrOpcode::kInt32Constant:
@@ -119,8 +124,8 @@ class IA32OperandGenerator FINAL : public OperandGenerator {
};
-static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
- Node* node) {
+static void VisitRRFloat64(InstructionSelector* selector,
+ InstructionCode opcode, Node* node) {
IA32OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)));
@@ -132,7 +137,6 @@ void InstructionSelector::VisitLoad(Node* node) {
MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
ArchOpcode opcode;
- // TODO(titzer): signed/unsigned small loads
switch (rep) {
case kRepFloat32:
opcode = kIA32Movss;
@@ -366,8 +370,7 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineSameAsFirst(node);
if (cont->IsSet()) {
- // TODO(turbofan): Use byte register here.
- outputs[output_count++] = g.DefineAsRegister(cont->result());
+ outputs[output_count++] = g.DefineAsByteRegister(cont->result());
}
DCHECK_NE(0u, input_count);
@@ -375,9 +378,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -462,7 +464,7 @@ void EmitLea(InstructionSelector* selector, Node* result, Node* index,
AddressingMode mode = g.GenerateMemoryOperandInputs(
index, scale, base, displacement, inputs, &input_count);
- DCHECK_NE(0, static_cast<int>(input_count));
+ DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
InstructionOperand outputs[1];
@@ -503,6 +505,12 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kIA32Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
IA32OperandGenerator g(this);
@@ -515,7 +523,7 @@ void InstructionSelector::VisitInt32Add(Node* node) {
AddressingMode mode = g.GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
- DCHECK_NE(0, static_cast<int>(input_count));
+ DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
InstructionOperand outputs[1];
@@ -646,6 +654,19 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
IA32OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
+ g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
if (IsSupported(AVX)) {
Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
@@ -689,27 +710,44 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+void InstructionSelector::VisitFloat64Max(Node* node) {
IA32OperandGenerator g(this);
- Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+ if (IsSupported(AVX)) {
+ Emit(kAVXFloat64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ } else {
+ Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ IA32OperandGenerator g(this);
+ if (IsSupported(AVX)) {
+ Emit(kAVXFloat64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ } else {
+ Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ }
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Floor, node);
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundDown), node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+ VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundToZero),
+ node);
}
@@ -718,7 +756,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
IA32OperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
@@ -745,6 +783,13 @@ void InstructionSelector::VisitCall(Node* node) {
Emit(kIA32Push, g.NoOutput(), value);
}
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler != nullptr) {
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -759,7 +804,7 @@ void InstructionSelector::VisitCall(Node* node) {
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(descriptor->flags());
+ opcode |= MiscField::encode(flags);
// Emit the call instruction.
InstructionOperand* first_output =
@@ -780,12 +825,10 @@ void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
IA32OperandGenerator g(selector);
if (cont->IsBranch()) {
selector->Emit(cont->Encode(opcode), g.NoOutput(), left, right,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
- // TODO(titzer): Needs byte register.
- selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
+ selector->Emit(cont->Encode(opcode), g.DefineAsByteRegister(cont->result()),
left, right);
}
}
@@ -834,6 +877,26 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWordCompare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
+ IA32OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
+ LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
+ ExternalReference js_stack_limit =
+ ExternalReference::address_of_stack_limit(selector->isolate());
+ if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kIA32StackCheck);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+ }
+ return;
+ }
+ }
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
@@ -928,64 +991,31 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
-void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
- BasicBlock** case_branches,
- int32_t* case_values, size_t case_count,
- int32_t min_value, int32_t max_value) {
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
IA32OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- InstructionOperand default_operand = g.Label(default_branch);
- // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
- // is 2^31-1, so don't assume that it's non-zero below.
- size_t value_range =
- 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
-
- // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
- // instruction.
- size_t table_space_cost = 4 + value_range;
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * case_count;
- size_t lookup_time_cost = case_count;
- if (case_count > 4 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
InstructionOperand index_operand = value_operand;
- if (min_value) {
+ if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kIA32Lea | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-min_value));
- }
- size_t input_count = 2 + value_range;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = index_operand;
- std::fill(&inputs[1], &inputs[input_count], default_operand);
- for (size_t index = 0; index < case_count; ++index) {
- size_t value = case_values[index] - min_value;
- BasicBlock* branch = case_branches[index];
- DCHECK_LE(0u, value);
- DCHECK_LT(value + 2, input_count);
- inputs[value + 2] = g.Label(branch);
+ value_operand, g.TempImmediate(-sw.min_value));
}
- Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
- return;
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
- size_t input_count = 2 + case_count * 2;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = value_operand;
- inputs[1] = default_operand;
- for (size_t index = 0; index < case_count; ++index) {
- int32_t value = case_values[index];
- BasicBlock* branch = case_branches[index];
- inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
- inputs[index * 2 + 2 + 1] = g.Label(branch);
- }
- Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
+ return EmitLookupSwitch(sw, value_operand);
}
@@ -1061,16 +1091,55 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ IA32OperandGenerator g(this);
+ Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Float64Matcher mleft(left);
+ if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
+ Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
+ return;
+ }
+ Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.Use(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ IA32OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.Use(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kWord32ShiftIsSafe;
if (CpuFeatures::IsSupported(SSE4_1)) {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
- MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kWord32ShiftIsSafe;
+ flags |= MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat64RoundTruncate;
}
- return MachineOperatorBuilder::Flag::kNoFlags;
+ return flags;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/ia32/linkage-ia32.cc b/deps/v8/src/compiler/ia32/linkage-ia32.cc
index 19dbc43f20..bfe201bf6f 100644
--- a/deps/v8/src/compiler/ia32/linkage-ia32.cc
+++ b/deps/v8/src/compiler/ia32/linkage-ia32.cc
@@ -46,9 +46,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties, MachineType return_type) {
return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties);
+ stack_parameter_count, flags, properties,
+ return_type);
}
diff --git a/deps/v8/src/compiler/instruction-codes.h b/deps/v8/src/compiler/instruction-codes.h
index ef1e942ed4..50e04349ea 100644
--- a/deps/v8/src/compiler/instruction-codes.h
+++ b/deps/v8/src/compiler/instruction-codes.h
@@ -40,6 +40,7 @@ namespace compiler {
V(ArchLookupSwitch) \
V(ArchTableSwitch) \
V(ArchNop) \
+ V(ArchDeoptimize) \
V(ArchRet) \
V(ArchStackPointer) \
V(ArchTruncateDoubleToI) \
diff --git a/deps/v8/src/compiler/instruction-selector-impl.h b/deps/v8/src/compiler/instruction-selector-impl.h
index 90898ba947..657167ee7d 100644
--- a/deps/v8/src/compiler/instruction-selector-impl.h
+++ b/deps/v8/src/compiler/instruction-selector-impl.h
@@ -8,12 +8,24 @@
#include "src/compiler/instruction.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
+#include "src/compiler/schedule.h"
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
namespace compiler {
+// Helper struct containing data about a table or lookup switch.
+struct SwitchInfo {
+ int32_t min_value; // minimum value of {case_values}
+ int32_t max_value; // maximum value of {case_values}
+ size_t value_range; // |max_value - min_value| + 1
+ size_t case_count; // number of cases
+ int32_t* case_values; // actual case values, unsorted
+ BasicBlock** case_branches; // basic blocks corresponding to case values
+ BasicBlock* default_branch; // default branch target
+};
+
// A helper class for the instruction selector that simplifies construction of
// Operands. This class implements a base for architecture-specific helpers.
class OperandGenerator {
@@ -74,6 +86,11 @@ class OperandGenerator {
GetVReg(node)));
}
+ InstructionOperand UseUniqueSlot(Node* node) {
+ return Use(node, UnallocatedOperand(UnallocatedOperand::MUST_HAVE_SLOT,
+ GetVReg(node)));
+ }
+
// Use register or operand for the node. If a register is chosen, it won't
// alias any temporary or output registers.
InstructionOperand UseUnique(Node* node) {
@@ -142,7 +159,8 @@ class OperandGenerator {
}
InstructionOperand Label(BasicBlock* block) {
- int index = sequence()->AddImmediate(Constant(block->GetRpoNumber()));
+ int index = sequence()->AddImmediate(
+ Constant(RpoNumber::FromInt(block->rpo_number())));
return ImmediateOperand(index);
}
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index 41b957a691..028b91459d 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -10,6 +10,8 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/state-values-utils.h"
namespace v8 {
namespace internal {
@@ -61,15 +63,15 @@ void InstructionSelector::SelectInstructions() {
// Schedule the selected instructions.
for (auto const block : *blocks) {
InstructionBlock* instruction_block =
- sequence()->InstructionBlockAt(block->GetRpoNumber());
+ sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
size_t end = instruction_block->code_end();
size_t start = instruction_block->code_start();
DCHECK_LE(end, start);
- sequence()->StartBlock(block->GetRpoNumber());
+ sequence()->StartBlock(RpoNumber::FromInt(block->rpo_number()));
while (start-- > end) {
sequence()->AddInstruction(instructions_[start]);
}
- sequence()->EndBlock(block->GetRpoNumber());
+ sequence()->EndBlock(RpoNumber::FromInt(block->rpo_number()));
}
}
@@ -398,7 +400,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->descriptor->GetInputType(0)));
break;
}
- DCHECK_EQ(1, static_cast<int>(buffer->instruction_args.size()));
+ DCHECK_EQ(1u, buffer->instruction_args.size());
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
@@ -477,7 +479,7 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
// We're done with the block.
InstructionBlock* instruction_block =
- sequence()->InstructionBlockAt(block->GetRpoNumber());
+ sequence()->InstructionBlockAt(RpoNumber::FromInt(block->rpo_number()));
instruction_block->set_code_start(static_cast<int>(instructions_.size()));
instruction_block->set_code_end(current_block_end);
@@ -485,34 +487,33 @@ void InstructionSelector::VisitBlock(BasicBlock* block) {
}
-namespace {
-
-V8_INLINE void CheckNoPhis(const BasicBlock* block) {
+void InstructionSelector::VisitControl(BasicBlock* block) {
#ifdef DEBUG
- // Branch targets should not have phis.
- for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
- const Node* node = *i;
- CHECK_NE(IrOpcode::kPhi, node->opcode());
+ // SSA deconstruction requires targets of branches not to have phis.
+ // Edge split form guarantees this property, but is more strict.
+ if (block->SuccessorCount() > 1) {
+ for (BasicBlock* const successor : block->successors()) {
+ for (Node* const node : *successor) {
+ CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
+ }
+ }
}
#endif
-}
-
-} // namespace
-
-void InstructionSelector::VisitControl(BasicBlock* block) {
Node* input = block->control_input();
switch (block->control()) {
case BasicBlock::kGoto:
return VisitGoto(block->SuccessorAt(0));
+ case BasicBlock::kCall: {
+ DCHECK_EQ(IrOpcode::kCall, input->opcode());
+ BasicBlock* success = block->SuccessorAt(0);
+ BasicBlock* exception = block->SuccessorAt(1);
+ return VisitCall(input, exception), VisitGoto(success);
+ }
case BasicBlock::kBranch: {
DCHECK_EQ(IrOpcode::kBranch, input->opcode());
BasicBlock* tbranch = block->SuccessorAt(0);
BasicBlock* fbranch = block->SuccessorAt(1);
- // SSA deconstruction requires targets of branches not to have phis.
- // Edge split form guarantees this property, but is more strict.
- CheckNoPhis(tbranch);
- CheckNoPhis(fbranch);
if (tbranch == fbranch) return VisitGoto(tbranch);
// Treat special Branch(Always, IfTrue, IfFalse) as Goto(IfTrue).
Node* const condition = input->InputAt(0);
@@ -521,41 +522,48 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
case BasicBlock::kSwitch: {
DCHECK_EQ(IrOpcode::kSwitch, input->opcode());
+ SwitchInfo sw;
// Last successor must be Default.
- BasicBlock* default_branch = block->successors().back();
- DCHECK_EQ(IrOpcode::kIfDefault, default_branch->front()->opcode());
- // SSA deconstruction requires targets of branches not to have phis.
- // Edge split form guarantees this property, but is more strict.
- CheckNoPhis(default_branch);
+ sw.default_branch = block->successors().back();
+ DCHECK_EQ(IrOpcode::kIfDefault, sw.default_branch->front()->opcode());
// All other successors must be cases.
- size_t case_count = block->SuccessorCount() - 1;
- DCHECK_LE(1u, case_count);
- BasicBlock** case_branches = &block->successors().front();
+ sw.case_count = block->SuccessorCount() - 1;
+ DCHECK_LE(1u, sw.case_count);
+ sw.case_branches = &block->successors().front();
// Determine case values and their min/max.
- int32_t* case_values = zone()->NewArray<int32_t>(case_count);
- int32_t min_value = std::numeric_limits<int32_t>::max();
- int32_t max_value = std::numeric_limits<int32_t>::min();
- for (size_t index = 0; index < case_count; ++index) {
- BasicBlock* branch = case_branches[index];
+ sw.case_values = zone()->NewArray<int32_t>(sw.case_count);
+ sw.min_value = std::numeric_limits<int32_t>::max();
+ sw.max_value = std::numeric_limits<int32_t>::min();
+ for (size_t index = 0; index < sw.case_count; ++index) {
+ BasicBlock* branch = sw.case_branches[index];
int32_t value = OpParameter<int32_t>(branch->front()->op());
- case_values[index] = value;
- if (min_value > value) min_value = value;
- if (max_value < value) max_value = value;
- // SSA deconstruction requires targets of branches not to have phis.
- // Edge split form guarantees this property, but is more strict.
- CheckNoPhis(branch);
+ sw.case_values[index] = value;
+ if (sw.min_value > value) sw.min_value = value;
+ if (sw.max_value < value) sw.max_value = value;
}
- DCHECK_LE(min_value, max_value);
- return VisitSwitch(input, default_branch, case_branches, case_values,
- case_count, min_value, max_value);
+ DCHECK_LE(sw.min_value, sw.max_value);
+ // Note that {value_range} can be 0 if {min_value} is -2^31 and
+ // {max_value}
+ // is 2^31-1, so don't assume that it's non-zero below.
+ sw.value_range = 1u + bit_cast<uint32_t>(sw.max_value) -
+ bit_cast<uint32_t>(sw.min_value);
+ return VisitSwitch(input, sw);
}
case BasicBlock::kReturn: {
// If the result itself is a return, return its input.
- Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
+ Node* value = (input != nullptr && input->opcode() == IrOpcode::kReturn)
? input->InputAt(0)
: input;
return VisitReturn(value);
}
+ case BasicBlock::kDeoptimize: {
+ // If the result itself is a return, return its input.
+ Node* value =
+ (input != nullptr && input->opcode() == IrOpcode::kDeoptimize)
+ ? input->InputAt(0)
+ : input;
+ return VisitDeoptimize(value);
+ }
case BasicBlock::kThrow:
DCHECK_EQ(IrOpcode::kThrow, input->opcode());
return VisitThrow(input->InputAt(0));
@@ -571,140 +579,6 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
}
-MachineType InstructionSelector::GetMachineType(Node* node) {
- DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
- switch (node->opcode()) {
- case IrOpcode::kStart:
- case IrOpcode::kLoop:
- case IrOpcode::kEnd:
- case IrOpcode::kBranch:
- case IrOpcode::kIfTrue:
- case IrOpcode::kIfFalse:
- case IrOpcode::kSwitch:
- case IrOpcode::kIfValue:
- case IrOpcode::kIfDefault:
- case IrOpcode::kEffectPhi:
- case IrOpcode::kEffectSet:
- case IrOpcode::kMerge:
- // No code needed for these graph artifacts.
- return kMachNone;
- case IrOpcode::kFinish:
- return kMachAnyTagged;
- case IrOpcode::kParameter:
- return linkage()->GetParameterType(OpParameter<int>(node));
- case IrOpcode::kOsrValue:
- return kMachAnyTagged;
- case IrOpcode::kPhi:
- return OpParameter<MachineType>(node);
- case IrOpcode::kProjection:
- // TODO(jarin) Really project from outputs.
- return kMachAnyTagged;
- case IrOpcode::kInt32Constant:
- return kMachInt32;
- case IrOpcode::kInt64Constant:
- return kMachInt64;
- case IrOpcode::kExternalConstant:
- return kMachPtr;
- case IrOpcode::kFloat64Constant:
- return kMachFloat64;
- case IrOpcode::kHeapConstant:
- case IrOpcode::kNumberConstant:
- return kMachAnyTagged;
- case IrOpcode::kCall:
- return kMachAnyTagged;
- case IrOpcode::kFrameState:
- case IrOpcode::kStateValues:
- return kMachNone;
- case IrOpcode::kLoad:
- return OpParameter<LoadRepresentation>(node);
- case IrOpcode::kStore:
- return kMachNone;
- case IrOpcode::kCheckedLoad:
- return OpParameter<MachineType>(node);
- case IrOpcode::kCheckedStore:
- return kMachNone;
- case IrOpcode::kWord32And:
- case IrOpcode::kWord32Or:
- case IrOpcode::kWord32Xor:
- case IrOpcode::kWord32Shl:
- case IrOpcode::kWord32Shr:
- case IrOpcode::kWord32Sar:
- case IrOpcode::kWord32Ror:
- return kMachInt32;
- case IrOpcode::kWord32Equal:
- return kMachBool;
- case IrOpcode::kWord64And:
- case IrOpcode::kWord64Or:
- case IrOpcode::kWord64Xor:
- case IrOpcode::kWord64Shl:
- case IrOpcode::kWord64Shr:
- case IrOpcode::kWord64Sar:
- case IrOpcode::kWord64Ror:
- return kMachInt64;
- case IrOpcode::kWord64Equal:
- return kMachBool;
- case IrOpcode::kInt32Add:
- case IrOpcode::kInt32AddWithOverflow:
- case IrOpcode::kInt32Sub:
- case IrOpcode::kInt32SubWithOverflow:
- case IrOpcode::kInt32Mul:
- case IrOpcode::kInt32Div:
- case IrOpcode::kInt32Mod:
- return kMachInt32;
- case IrOpcode::kInt32LessThan:
- case IrOpcode::kInt32LessThanOrEqual:
- case IrOpcode::kUint32LessThan:
- case IrOpcode::kUint32LessThanOrEqual:
- return kMachBool;
- case IrOpcode::kInt64Add:
- case IrOpcode::kInt64Sub:
- case IrOpcode::kInt64Mul:
- case IrOpcode::kInt64Div:
- case IrOpcode::kInt64Mod:
- return kMachInt64;
- case IrOpcode::kInt64LessThan:
- case IrOpcode::kInt64LessThanOrEqual:
- return kMachBool;
- case IrOpcode::kChangeFloat32ToFloat64:
- case IrOpcode::kChangeInt32ToFloat64:
- case IrOpcode::kChangeUint32ToFloat64:
- return kMachFloat64;
- case IrOpcode::kChangeFloat64ToInt32:
- return kMachInt32;
- case IrOpcode::kChangeFloat64ToUint32:
- return kMachUint32;
- case IrOpcode::kChangeInt32ToInt64:
- return kMachInt64;
- case IrOpcode::kChangeUint32ToUint64:
- return kMachUint64;
- case IrOpcode::kTruncateFloat64ToFloat32:
- return kMachFloat32;
- case IrOpcode::kTruncateFloat64ToInt32:
- case IrOpcode::kTruncateInt64ToInt32:
- return kMachInt32;
- case IrOpcode::kFloat64Add:
- case IrOpcode::kFloat64Sub:
- case IrOpcode::kFloat64Mul:
- case IrOpcode::kFloat64Div:
- case IrOpcode::kFloat64Mod:
- case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
- case IrOpcode::kFloat64RoundTruncate:
- case IrOpcode::kFloat64RoundTiesAway:
- return kMachFloat64;
- case IrOpcode::kFloat64Equal:
- case IrOpcode::kFloat64LessThan:
- case IrOpcode::kFloat64LessThanOrEqual:
- return kMachBool;
- default:
- V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
- node->opcode(), node->op()->mnemonic(), node->id());
- }
- return kMachNone;
-}
-
-
void InstructionSelector::VisitNode(Node* node) {
DCHECK_NOT_NULL(schedule()->block(node)); // should only use scheduled nodes.
SourcePosition source_position = source_positions_->GetSourcePosition(node);
@@ -721,6 +595,8 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kBranch:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
+ case IrOpcode::kIfSuccess:
+ case IrOpcode::kIfException:
case IrOpcode::kSwitch:
case IrOpcode::kIfValue:
case IrOpcode::kIfDefault:
@@ -760,7 +636,7 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitConstant(node);
}
case IrOpcode::kCall:
- return VisitCall(node);
+ return VisitCall(node, nullptr);
case IrOpcode::kFrameState:
case IrOpcode::kStateValues:
return;
@@ -787,6 +663,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitWord32Ror(node);
case IrOpcode::kWord32Equal:
return VisitWord32Equal(node);
+ case IrOpcode::kWord32Clz:
+ return VisitWord32Clz(node);
case IrOpcode::kWord64And:
return VisitWord64And(node);
case IrOpcode::kWord64Or:
@@ -883,6 +761,10 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsDouble(node), VisitFloat64Div(node);
case IrOpcode::kFloat64Mod:
return MarkAsDouble(node), VisitFloat64Mod(node);
+ case IrOpcode::kFloat64Min:
+ return MarkAsDouble(node), VisitFloat64Min(node);
+ case IrOpcode::kFloat64Max:
+ return MarkAsDouble(node), VisitFloat64Max(node);
case IrOpcode::kFloat64Sqrt:
return MarkAsDouble(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Equal:
@@ -891,14 +773,20 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitFloat64LessThan(node);
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
- case IrOpcode::kFloat64Floor:
- return MarkAsDouble(node), VisitFloat64Floor(node);
- case IrOpcode::kFloat64Ceil:
- return MarkAsDouble(node), VisitFloat64Ceil(node);
+ case IrOpcode::kFloat64RoundDown:
+ return MarkAsDouble(node), VisitFloat64RoundDown(node);
case IrOpcode::kFloat64RoundTruncate:
return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
+ case IrOpcode::kFloat64ExtractLowWord32:
+ return VisitFloat64ExtractLowWord32(node);
+ case IrOpcode::kFloat64ExtractHighWord32:
+ return VisitFloat64ExtractHighWord32(node);
+ case IrOpcode::kFloat64InsertLowWord32:
+ return MarkAsDouble(node), VisitFloat64InsertLowWord32(node);
+ case IrOpcode::kFloat64InsertHighWord32:
+ return MarkAsDouble(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
case IrOpcode::kCheckedLoad: {
@@ -930,6 +818,43 @@ void InstructionSelector::VisitLoadStackPointer(Node* node) {
Emit(kArchStackPointer, g.DefineAsRegister(node));
}
+
+void InstructionSelector::EmitTableSwitch(const SwitchInfo& sw,
+ InstructionOperand& index_operand) {
+ OperandGenerator g(this);
+ size_t input_count = 2 + sw.value_range;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = index_operand;
+ InstructionOperand default_operand = g.Label(sw.default_branch);
+ std::fill(&inputs[1], &inputs[input_count], default_operand);
+ for (size_t index = 0; index < sw.case_count; ++index) {
+ size_t value = sw.case_values[index] - sw.min_value;
+ BasicBlock* branch = sw.case_branches[index];
+ DCHECK_LE(0u, value);
+ DCHECK_LT(value + 2, input_count);
+ inputs[value + 2] = g.Label(branch);
+ }
+ Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
+}
+
+
+void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
+ InstructionOperand& value_operand) {
+ OperandGenerator g(this);
+ size_t input_count = 2 + sw.case_count * 2;
+ auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
+ inputs[0] = value_operand;
+ inputs[1] = g.Label(sw.default_branch);
+ for (size_t index = 0; index < sw.case_count; ++index) {
+ int32_t value = sw.case_values[index];
+ BasicBlock* branch = sw.case_branches[index];
+ inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
+ inputs[index * 2 + 2 + 1] = g.Label(branch);
+ }
+ Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
+}
+
+
#endif // V8_TURBOFAN_BACKEND
// 32 bit targets do not implement the following instructions.
@@ -1037,7 +962,9 @@ void InstructionSelector::VisitPhi(Node* node) {
PhiInstruction* phi = new (instruction_zone())
PhiInstruction(instruction_zone(), GetVirtualRegister(node),
static_cast<size_t>(input_count));
- sequence()->InstructionBlockAt(current_block_->GetRpoNumber())->AddPhi(phi);
+ sequence()
+ ->InstructionBlockAt(RpoNumber::FromInt(current_block_->rpo_number()))
+ ->AddPhi(phi);
for (int i = 0; i < input_count; ++i) {
Node* const input = node->InputAt(i);
MarkAsUsed(input);
@@ -1076,7 +1003,7 @@ void InstructionSelector::VisitConstant(Node* node) {
void InstructionSelector::VisitGoto(BasicBlock* target) {
// jump to the next block.
OperandGenerator g(this);
- Emit(kArchJmp, g.NoOutput(), g.Label(target))->MarkAsControl();
+ Emit(kArchJmp, g.NoOutput(), g.Label(target));
}
@@ -1092,20 +1019,32 @@ void InstructionSelector::VisitReturn(Node* value) {
}
-void InstructionSelector::VisitThrow(Node* value) {
+void InstructionSelector::VisitDeoptimize(Node* value) {
+ DCHECK(FLAG_turbo_deoptimization);
+
OperandGenerator g(this);
- Emit(kArchNop, g.NoOutput()); // TODO(titzer)
+
+ FrameStateDescriptor* desc = GetFrameStateDescriptor(value);
+ size_t arg_count = desc->GetTotalSize() + 1; // Include deopt id.
+
+ InstructionOperandVector args(instruction_zone());
+ args.reserve(arg_count);
+
+ InstructionSequence::StateId state_id =
+ sequence()->AddFrameStateDescriptor(desc);
+ args.push_back(g.TempImmediate(state_id.ToInt()));
+
+ AddFrameStateInputs(value, &args, desc);
+
+ DCHECK_EQ(args.size(), arg_count);
+
+ Emit(kArchDeoptimize, 0, nullptr, arg_count, &args.front(), 0, nullptr);
}
-void InstructionSelector::FillTypeVectorFromStateValues(
- ZoneVector<MachineType>* types, Node* state_values) {
- DCHECK(state_values->opcode() == IrOpcode::kStateValues);
- int count = state_values->InputCount();
- types->reserve(static_cast<size_t>(count));
- for (int i = 0; i < count; i++) {
- types->push_back(GetMachineType(state_values->InputAt(i)));
- }
+void InstructionSelector::VisitThrow(Node* value) {
+ OperandGenerator g(this);
+ Emit(kArchNop, g.NoOutput()); // TODO(titzer)
}
@@ -1113,14 +1052,15 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
Node* state) {
DCHECK(state->opcode() == IrOpcode::kFrameState);
DCHECK_EQ(5, state->InputCount());
- DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(0)->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(1)->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, state->InputAt(2)->opcode());
+ DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(0)->opcode());
+ DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(1)->opcode());
+ DCHECK_EQ(IrOpcode::kTypedStateValues, state->InputAt(2)->opcode());
FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
- int parameters = state->InputAt(0)->InputCount();
- int locals = state->InputAt(1)->InputCount();
- int stack = state->InputAt(2)->InputCount();
+ int parameters =
+ static_cast<int>(StateValuesAccess(state->InputAt(0)).size());
+ int locals = static_cast<int>(StateValuesAccess(state->InputAt(1)).size());
+ int stack = static_cast<int>(StateValuesAccess(state->InputAt(2)).size());
FrameStateDescriptor* outer_state = NULL;
Node* outer_node = state->InputAt(4);
@@ -1133,7 +1073,7 @@ FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
}
-static InstructionOperand UseOrImmediate(OperandGenerator* g, Node* input) {
+static InstructionOperand SlotOrImmediate(OperandGenerator* g, Node* input) {
switch (input->opcode()) {
case IrOpcode::kInt32Constant:
case IrOpcode::kNumberConstant:
@@ -1141,7 +1081,7 @@ static InstructionOperand UseOrImmediate(OperandGenerator* g, Node* input) {
case IrOpcode::kHeapConstant:
return g->UseImmediate(input);
default:
- return g->UseUnique(input);
+ return g->UseUniqueSlot(input);
}
}
@@ -1160,38 +1100,36 @@ void InstructionSelector::AddFrameStateInputs(
Node* stack = state->InputAt(2);
Node* context = state->InputAt(3);
- DCHECK_EQ(IrOpcode::kStateValues, parameters->op()->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
- DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
+ DCHECK_EQ(IrOpcode::kTypedStateValues, parameters->op()->opcode());
+ DCHECK_EQ(IrOpcode::kTypedStateValues, locals->op()->opcode());
+ DCHECK_EQ(IrOpcode::kTypedStateValues, stack->op()->opcode());
- DCHECK_EQ(static_cast<int>(descriptor->parameters_count()),
- parameters->InputCount());
- DCHECK_EQ(static_cast<int>(descriptor->locals_count()), locals->InputCount());
- DCHECK_EQ(static_cast<int>(descriptor->stack_count()), stack->InputCount());
+ DCHECK_EQ(descriptor->parameters_count(),
+ StateValuesAccess(parameters).size());
+ DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
+ DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
ZoneVector<MachineType> types(instruction_zone());
types.reserve(descriptor->GetSize());
OperandGenerator g(this);
size_t value_index = 0;
- for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
- Node* input_node = parameters->InputAt(i);
- inputs->push_back(UseOrImmediate(&g, input_node));
- descriptor->SetType(value_index++, GetMachineType(input_node));
+ for (StateValuesAccess::TypedNode input_node :
+ StateValuesAccess(parameters)) {
+ inputs->push_back(SlotOrImmediate(&g, input_node.node));
+ descriptor->SetType(value_index++, input_node.type);
}
if (descriptor->HasContext()) {
- inputs->push_back(UseOrImmediate(&g, context));
+ inputs->push_back(SlotOrImmediate(&g, context));
descriptor->SetType(value_index++, kMachAnyTagged);
}
- for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
- Node* input_node = locals->InputAt(i);
- inputs->push_back(UseOrImmediate(&g, input_node));
- descriptor->SetType(value_index++, GetMachineType(input_node));
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(locals)) {
+ inputs->push_back(SlotOrImmediate(&g, input_node.node));
+ descriptor->SetType(value_index++, input_node.type);
}
- for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
- Node* input_node = stack->InputAt(i);
- inputs->push_back(UseOrImmediate(&g, input_node));
- descriptor->SetType(value_index++, GetMachineType(input_node));
+ for (StateValuesAccess::TypedNode input_node : StateValuesAccess(stack)) {
+ inputs->push_back(SlotOrImmediate(&g, input_node.node));
+ descriptor->SetType(value_index++, input_node.type);
}
DCHECK(value_index == descriptor->GetSize());
}
@@ -1205,7 +1143,9 @@ MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
#undef DECLARE_UNIMPLEMENTED_SELECTOR
-void InstructionSelector::VisitCall(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
+ UNIMPLEMENTED();
+}
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
@@ -1214,10 +1154,7 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
-void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
- BasicBlock** case_branches,
- int32_t* case_values, size_t case_count,
- int32_t min_value, int32_t max_value) {
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/compiler/instruction-selector.h b/deps/v8/src/compiler/instruction-selector.h
index 5c31db74e9..fcf205a8d9 100644
--- a/deps/v8/src/compiler/instruction-selector.h
+++ b/deps/v8/src/compiler/instruction-selector.h
@@ -18,10 +18,11 @@ namespace internal {
namespace compiler {
// Forward declarations.
+class BasicBlock;
struct CallBuffer; // TODO(bmeurer): Remove this.
class FlagsContinuation;
class Linkage;
-
+struct SwitchInfo;
typedef ZoneVector<InstructionOperand> InstructionOperandVector;
@@ -129,9 +130,15 @@ class InstructionSelector FINAL {
int GetVirtualRegister(const Node* node);
const std::map<NodeId, int> GetVirtualRegistersForTesting() const;
+ Isolate* isolate() const { return sequence()->isolate(); }
+
private:
friend class OperandGenerator;
+ void EmitTableSwitch(const SwitchInfo& sw, InstructionOperand& index_operand);
+ void EmitLookupSwitch(const SwitchInfo& sw,
+ InstructionOperand& value_operand);
+
// Inform the instruction selection that {node} was just defined.
void MarkAsDefined(Node* node);
@@ -169,11 +176,8 @@ class InstructionSelector FINAL {
bool call_address_immediate);
FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
- void FillTypeVectorFromStateValues(ZoneVector<MachineType>* parameters,
- Node* state_values);
void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
FrameStateDescriptor* descriptor);
- MachineType GetMachineType(Node* node);
// ===========================================================================
// ============= Architecture-specific graph covering methods. ===============
@@ -199,15 +203,13 @@ class InstructionSelector FINAL {
void VisitPhi(Node* node);
void VisitProjection(Node* node);
void VisitConstant(Node* node);
- void VisitCall(Node* call);
+ void VisitCall(Node* call, BasicBlock* handler);
void VisitGoto(BasicBlock* target);
void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
- void VisitSwitch(Node* node, BasicBlock* default_branch,
- BasicBlock** case_branches, int32_t* case_values,
- size_t case_count, int32_t min_value, int32_t max_value);
+ void VisitSwitch(Node* node, const SwitchInfo& sw);
+ void VisitDeoptimize(Node* value);
void VisitReturn(Node* value);
void VisitThrow(Node* value);
- void VisitDeoptimize(Node* deopt);
// ===========================================================================
diff --git a/deps/v8/src/compiler/instruction.cc b/deps/v8/src/compiler/instruction.cc
index ebd8125848..8446a0dced 100644
--- a/deps/v8/src/compiler/instruction.cc
+++ b/deps/v8/src/compiler/instruction.cc
@@ -5,6 +5,7 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/instruction.h"
+#include "src/compiler/schedule.h"
namespace v8 {
namespace internal {
@@ -32,6 +33,8 @@ std::ostream& operator<<(std::ostream& os,
unalloc->fixed_register_index()) << ")";
case UnallocatedOperand::MUST_HAVE_REGISTER:
return os << "(R)";
+ case UnallocatedOperand::MUST_HAVE_SLOT:
+ return os << "(S)";
case UnallocatedOperand::SAME_AS_FIRST_INPUT:
return os << "(1)";
case UnallocatedOperand::ANY:
@@ -81,11 +84,32 @@ bool ParallelMove::IsRedundant() const {
}
+MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
+ auto move_ops = move_operands();
+ MoveOperands* replacement = nullptr;
+ MoveOperands* to_eliminate = nullptr;
+ for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) {
+ if (curr->IsEliminated()) continue;
+ if (curr->destination()->Equals(move->source())) {
+ DCHECK(!replacement);
+ replacement = curr;
+ if (to_eliminate != nullptr) break;
+ } else if (curr->destination()->Equals(move->destination())) {
+ DCHECK(!to_eliminate);
+ to_eliminate = curr;
+ if (replacement != nullptr) break;
+ }
+ }
+ DCHECK_IMPLIES(replacement == to_eliminate, replacement == nullptr);
+ if (replacement != nullptr) move->set_source(replacement->source());
+ return to_eliminate;
+}
+
+
Instruction::Instruction(InstructionCode opcode)
: opcode_(opcode),
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
- TempCountField::encode(0) | IsCallField::encode(false) |
- IsControlField::encode(false)),
+ TempCountField::encode(0) | IsCallField::encode(false)),
pointer_map_(NULL) {}
@@ -97,7 +121,7 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
bit_field_(OutputCountField::encode(output_count) |
InputCountField::encode(input_count) |
TempCountField::encode(temp_count) |
- IsCallField::encode(false) | IsControlField::encode(false)),
+ IsCallField::encode(false)),
pointer_map_(NULL) {
size_t offset = 0;
for (size_t i = 0; i < output_count; ++i) {
@@ -314,6 +338,9 @@ std::ostream& operator<<(std::ostream& os,
}
+Constant::Constant(int32_t v) : type_(kInt32), value_(v) {}
+
+
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
case Constant::kInt32:
@@ -353,15 +380,12 @@ void PhiInstruction::SetInput(size_t offset, int virtual_register) {
}
-InstructionBlock::InstructionBlock(Zone* zone, BasicBlock::Id id,
- BasicBlock::RpoNumber rpo_number,
- BasicBlock::RpoNumber loop_header,
- BasicBlock::RpoNumber loop_end,
+InstructionBlock::InstructionBlock(Zone* zone, RpoNumber rpo_number,
+ RpoNumber loop_header, RpoNumber loop_end,
bool deferred)
: successors_(zone),
predecessors_(zone),
phis_(zone),
- id_(id),
ao_number_(rpo_number),
rpo_number_(rpo_number),
loop_header_(loop_header),
@@ -371,8 +395,7 @@ InstructionBlock::InstructionBlock(Zone* zone, BasicBlock::Id id,
deferred_(deferred) {}
-size_t InstructionBlock::PredecessorIndexOf(
- BasicBlock::RpoNumber rpo_number) const {
+size_t InstructionBlock::PredecessorIndexOf(RpoNumber rpo_number) const {
size_t j = 0;
for (InstructionBlock::Predecessors::const_iterator i = predecessors_.begin();
i != predecessors_.end(); ++i, ++j) {
@@ -382,31 +405,31 @@ size_t InstructionBlock::PredecessorIndexOf(
}
-static BasicBlock::RpoNumber GetRpo(BasicBlock* block) {
- if (block == NULL) return BasicBlock::RpoNumber::Invalid();
- return block->GetRpoNumber();
+static RpoNumber GetRpo(const BasicBlock* block) {
+ if (block == NULL) return RpoNumber::Invalid();
+ return RpoNumber::FromInt(block->rpo_number());
}
-static BasicBlock::RpoNumber GetLoopEndRpo(const BasicBlock* block) {
- if (!block->IsLoopHeader()) return BasicBlock::RpoNumber::Invalid();
- return block->loop_end()->GetRpoNumber();
+static RpoNumber GetLoopEndRpo(const BasicBlock* block) {
+ if (!block->IsLoopHeader()) return RpoNumber::Invalid();
+ return RpoNumber::FromInt(block->loop_end()->rpo_number());
}
static InstructionBlock* InstructionBlockFor(Zone* zone,
const BasicBlock* block) {
- InstructionBlock* instr_block = new (zone) InstructionBlock(
- zone, block->id(), block->GetRpoNumber(), GetRpo(block->loop_header()),
- GetLoopEndRpo(block), block->deferred());
+ InstructionBlock* instr_block = new (zone)
+ InstructionBlock(zone, GetRpo(block), GetRpo(block->loop_header()),
+ GetLoopEndRpo(block), block->deferred());
// Map successors and precessors
instr_block->successors().reserve(block->SuccessorCount());
for (BasicBlock* successor : block->successors()) {
- instr_block->successors().push_back(successor->GetRpoNumber());
+ instr_block->successors().push_back(GetRpo(successor));
}
instr_block->predecessors().reserve(block->PredecessorCount());
for (BasicBlock* predecessor : block->predecessors()) {
- instr_block->predecessors().push_back(predecessor->GetRpoNumber());
+ instr_block->predecessors().push_back(GetRpo(predecessor));
}
return instr_block;
}
@@ -421,7 +444,7 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
for (BasicBlockVector::const_iterator it = schedule->rpo_order()->begin();
it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
DCHECK(!(*blocks)[rpo_number]);
- DCHECK((*it)->GetRpoNumber().ToSize() == rpo_number);
+ DCHECK(GetRpo(*it).ToSize() == rpo_number);
(*blocks)[rpo_number] = InstructionBlockFor(zone, *it);
}
ComputeAssemblyOrder(blocks);
@@ -433,12 +456,12 @@ void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
int ao = 0;
for (auto const block : *blocks) {
if (!block->IsDeferred()) {
- block->set_ao_number(BasicBlock::RpoNumber::FromInt(ao++));
+ block->set_ao_number(RpoNumber::FromInt(ao++));
}
}
for (auto const block : *blocks) {
if (block->IsDeferred()) {
- block->set_ao_number(BasicBlock::RpoNumber::FromInt(ao++));
+ block->set_ao_number(RpoNumber::FromInt(ao++));
}
}
}
@@ -471,14 +494,13 @@ int InstructionSequence::NextVirtualRegister() {
}
-GapInstruction* InstructionSequence::GetBlockStart(
- BasicBlock::RpoNumber rpo) const {
+GapInstruction* InstructionSequence::GetBlockStart(RpoNumber rpo) const {
const InstructionBlock* block = InstructionBlockAt(rpo);
return GapInstruction::cast(InstructionAt(block->code_start()));
}
-void InstructionSequence::StartBlock(BasicBlock::RpoNumber rpo) {
+void InstructionSequence::StartBlock(RpoNumber rpo) {
DCHECK(block_starts_.size() == rpo.ToSize());
InstructionBlock* block = InstructionBlockAt(rpo);
int code_start = static_cast<int>(instructions_.size());
@@ -487,7 +509,7 @@ void InstructionSequence::StartBlock(BasicBlock::RpoNumber rpo) {
}
-void InstructionSequence::EndBlock(BasicBlock::RpoNumber rpo) {
+void InstructionSequence::EndBlock(RpoNumber rpo) {
int end = static_cast<int>(instructions_.size());
InstructionBlock* block = InstructionBlockAt(rpo);
if (block->code_start() == end) { // Empty block. Insert a nop.
@@ -646,6 +668,11 @@ void FrameStateDescriptor::SetType(size_t index, MachineType type) {
}
+std::ostream& operator<<(std::ostream& os, const RpoNumber& rpo) {
+ return os << rpo.ToSize();
+}
+
+
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionSequence& printable) {
const InstructionSequence& code = *printable.sequence_;
@@ -659,13 +686,12 @@ std::ostream& operator<<(std::ostream& os,
os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
}
for (int i = 0; i < code.InstructionBlockCount(); i++) {
- BasicBlock::RpoNumber rpo = BasicBlock::RpoNumber::FromInt(i);
+ RpoNumber rpo = RpoNumber::FromInt(i);
const InstructionBlock* block = code.InstructionBlockAt(rpo);
CHECK(block->rpo_number() == rpo);
- os << "RPO#" << block->rpo_number();
+ os << "B" << block->rpo_number();
os << ": AO#" << block->ao_number();
- os << ": B" << block->id();
if (block->IsDeferred()) os << " (deferred)";
if (block->IsLoopHeader()) {
os << " loop blocks: [" << block->rpo_number() << ", "
@@ -675,8 +701,7 @@ std::ostream& operator<<(std::ostream& os,
<< block->code_end() << ")\n predecessors:";
for (auto pred : block->predecessors()) {
- const InstructionBlock* pred_block = code.InstructionBlockAt(pred);
- os << " B" << pred_block->id();
+ os << " B" << pred.ToInt();
}
os << "\n";
@@ -703,8 +728,7 @@ std::ostream& operator<<(std::ostream& os,
}
for (auto succ : block->successors()) {
- const InstructionBlock* succ_block = code.InstructionBlockAt(succ);
- os << " B" << succ_block->id();
+ os << " B" << succ.ToInt();
}
os << "\n";
}
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index d04d0367f5..38fc433744 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -15,7 +15,6 @@
#include "src/compiler/instruction-codes.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/register-configuration.h"
-#include "src/compiler/schedule.h"
#include "src/compiler/source-position.h"
#include "src/zone-allocator.h"
@@ -23,6 +22,8 @@ namespace v8 {
namespace internal {
namespace compiler {
+class Schedule;
+
// A couple of reserved opcodes are used for internal use.
const InstructionCode kGapInstruction = -1;
const InstructionCode kSourcePositionInstruction = -2;
@@ -50,14 +51,11 @@ class InstructionOperand {
DOUBLE_REGISTER
};
- InstructionOperand() : virtual_register_(kInvalidVirtualRegister) {
- ConvertTo(INVALID, 0);
- }
+ InstructionOperand() { ConvertTo(INVALID, 0, kInvalidVirtualRegister); }
- InstructionOperand(Kind kind, int index)
- : virtual_register_(kInvalidVirtualRegister) {
- DCHECK(kind != INVALID);
- ConvertTo(kind, index);
+ InstructionOperand(Kind kind, int index) {
+ DCHECK(kind != UNALLOCATED && kind != INVALID);
+ ConvertTo(kind, index, kInvalidVirtualRegister);
}
static InstructionOperand* New(Zone* zone, Kind kind, int index) {
@@ -65,7 +63,11 @@ class InstructionOperand {
}
Kind kind() const { return KindField::decode(value_); }
- int index() const { return static_cast<int>(value_) >> KindField::kSize; }
+ // TODO(dcarney): move this to subkind operand.
+ int index() const {
+ DCHECK(kind() != UNALLOCATED && kind() != INVALID);
+ return static_cast<int64_t>(value_) >> IndexField::kShift;
+ }
#define INSTRUCTION_OPERAND_PREDICATE(name, type) \
bool Is##name() const { return kind() == type; }
INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
@@ -77,11 +79,13 @@ class InstructionOperand {
}
void ConvertTo(Kind kind, int index) {
- if (kind == REGISTER || kind == DOUBLE_REGISTER) DCHECK(index >= 0);
- value_ = KindField::encode(kind);
- value_ |= bit_cast<unsigned>(index << KindField::kSize);
- DCHECK(this->index() == index);
- if (kind != UNALLOCATED) virtual_register_ = kInvalidVirtualRegister;
+ DCHECK(kind != UNALLOCATED && kind != INVALID);
+ ConvertTo(kind, index, kInvalidVirtualRegister);
+ }
+
+ // Useful for map/set keys.
+ bool operator<(const InstructionOperand& op) const {
+ return value_ < op.value_;
}
protected:
@@ -91,15 +95,28 @@ class InstructionOperand {
return new (buffer) SubKindOperand(op);
}
- InstructionOperand(Kind kind, int index, int virtual_register)
- : virtual_register_(virtual_register) {
- ConvertTo(kind, index);
+ InstructionOperand(Kind kind, int index, int virtual_register) {
+ ConvertTo(kind, index, virtual_register);
}
- typedef BitField<Kind, 0, 3> KindField;
- uint32_t value_;
- // TODO(dcarney): this should really be unsigned.
- int32_t virtual_register_;
+ void ConvertTo(Kind kind, int index, int virtual_register) {
+ if (kind == REGISTER || kind == DOUBLE_REGISTER) DCHECK(index >= 0);
+ if (kind != UNALLOCATED) {
+ DCHECK(virtual_register == kInvalidVirtualRegister);
+ }
+ value_ = KindField::encode(kind);
+ value_ |=
+ VirtualRegisterField::encode(static_cast<uint32_t>(virtual_register));
+ value_ |= static_cast<int64_t>(index) << IndexField::kShift;
+ DCHECK(((kind == UNALLOCATED || kind == INVALID) && index == 0) ||
+ this->index() == index);
+ }
+
+ typedef BitField64<Kind, 0, 3> KindField;
+ typedef BitField64<uint32_t, 3, 32> VirtualRegisterField;
+ typedef BitField64<int32_t, 35, 29> IndexField;
+
+ uint64_t value_;
};
struct PrintableInstructionOperand {
@@ -120,6 +137,7 @@ class UnallocatedOperand : public InstructionOperand {
FIXED_REGISTER,
FIXED_DOUBLE_REGISTER,
MUST_HAVE_REGISTER,
+ MUST_HAVE_SLOT,
SAME_AS_FIRST_INPUT
};
@@ -148,7 +166,7 @@ class UnallocatedOperand : public InstructionOperand {
: InstructionOperand(UNALLOCATED, 0, virtual_register) {
DCHECK(policy == FIXED_SLOT);
value_ |= BasicPolicyField::encode(policy);
- value_ |= static_cast<int32_t>(index) << FixedSlotIndexField::kShift;
+ value_ |= static_cast<int64_t>(index) << FixedSlotIndexField::kShift;
DCHECK(this->fixed_slot_index() == index);
}
@@ -196,35 +214,33 @@ class UnallocatedOperand : public InstructionOperand {
// because it accommodates a larger pay-load.
//
// For FIXED_SLOT policy:
- // +-----------------------------+
- // | slot_index | 0 | 001 |
- // +-----------------------------+
+ // +------------------------------------------------+
+ // | slot_index | 0 | virtual_register | 001 |
+ // +------------------------------------------------+
//
// For all other (extended) policies:
- // +----------------------------------+
- // | reg_index | L | PPP | 1 | 001 | L ... Lifetime
- // +----------------------------------+ P ... Policy
+ // +-----------------------------------------------------+
+ // | reg_index | L | PPP | 1 | virtual_register | 001 |
+ // +-----------------------------------------------------+
+ // L ... Lifetime
+ // P ... Policy
//
// The slot index is a signed value which requires us to decode it manually
// instead of using the BitField utility class.
- // The superclass has a KindField.
- STATIC_ASSERT(KindField::kSize == 3);
+ // All bits fit into the index field.
+ STATIC_ASSERT(IndexField::kShift == 35);
// BitFields for all unallocated operands.
- class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
+ class BasicPolicyField : public BitField64<BasicPolicy, 35, 1> {};
// BitFields specific to BasicPolicy::FIXED_SLOT.
- class FixedSlotIndexField : public BitField<int, 4, 28> {};
+ class FixedSlotIndexField : public BitField64<int, 36, 28> {};
// BitFields specific to BasicPolicy::EXTENDED_POLICY.
- class ExtendedPolicyField : public BitField<ExtendedPolicy, 4, 3> {};
- class LifetimeField : public BitField<Lifetime, 7, 1> {};
- class FixedRegisterField : public BitField<int, 8, 6> {};
-
- static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
- static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
- static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+ class ExtendedPolicyField : public BitField64<ExtendedPolicy, 36, 3> {};
+ class LifetimeField : public BitField64<Lifetime, 39, 1> {};
+ class FixedRegisterField : public BitField64<int, 40, 6> {};
// Predicates for the operand policy.
bool HasAnyPolicy() const {
@@ -239,6 +255,10 @@ class UnallocatedOperand : public InstructionOperand {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == MUST_HAVE_REGISTER;
}
+ bool HasSlotPolicy() const {
+ return basic_policy() == EXTENDED_POLICY &&
+ extended_policy() == MUST_HAVE_SLOT;
+ }
bool HasSameAsInputPolicy() const {
return basic_policy() == EXTENDED_POLICY &&
extended_policy() == SAME_AS_FIRST_INPUT;
@@ -268,7 +288,7 @@ class UnallocatedOperand : public InstructionOperand {
// [fixed_slot_index]: Only for FIXED_SLOT.
int fixed_slot_index() const {
DCHECK(HasFixedSlotPolicy());
- return static_cast<int>(bit_cast<int32_t>(value_) >>
+ return static_cast<int>(static_cast<int64_t>(value_) >>
FixedSlotIndexField::kShift);
}
@@ -281,13 +301,13 @@ class UnallocatedOperand : public InstructionOperand {
// [virtual_register]: The virtual register ID for this operand.
int32_t virtual_register() const {
DCHECK_EQ(UNALLOCATED, kind());
- return virtual_register_;
+ return static_cast<int32_t>(VirtualRegisterField::decode(value_));
}
// TODO(dcarney): remove this.
void set_virtual_register(int32_t id) {
DCHECK_EQ(UNALLOCATED, kind());
- virtual_register_ = id;
+ value_ = VirtualRegisterField::update(value_, static_cast<uint32_t>(id));
}
// [lifetime]: Only for non-FIXED_SLOT.
@@ -391,6 +411,12 @@ class ParallelMove FINAL : public ZoneObject {
return &move_operands_;
}
+ // Prepare this ParallelMove to insert move as if it happened in a subsequent
+ // ParallelMove. move->source() may be changed. The MoveOperand returned
+ // must be Eliminated and, as it points directly into move_operands_, it must
+ // be Eliminated before any further mutation.
+ MoveOperands* PrepareInsertAfter(MoveOperands* move) const;
+
private:
ZoneList<MoveOperands> move_operands_;
};
@@ -487,7 +513,7 @@ class Instruction {
return FlagsConditionField::decode(opcode());
}
- // TODO(titzer): make control and call into flags.
+ // TODO(titzer): make call into a flags.
static Instruction* New(Zone* zone, InstructionCode opcode) {
return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
}
@@ -509,17 +535,10 @@ class Instruction {
opcode, output_count, outputs, input_count, inputs, temp_count, temps);
}
- // TODO(titzer): another holdover from lithium days; register allocator
- // should not need to know about control instructions.
- Instruction* MarkAsControl() {
- bit_field_ = IsControlField::update(bit_field_, true);
- return this;
- }
Instruction* MarkAsCall() {
bit_field_ = IsCallField::update(bit_field_, true);
return this;
}
- bool IsControl() const { return IsControlField::decode(bit_field_); }
bool IsCall() const { return IsCallField::decode(bit_field_); }
bool NeedsPointerMap() const { return IsCall(); }
bool HasPointerMap() const { return pointer_map_ != NULL; }
@@ -562,7 +581,6 @@ class Instruction {
typedef BitField<size_t, 8, 16> InputCountField;
typedef BitField<size_t, 24, 6> TempCountField;
typedef BitField<bool, 30, 1> IsCallField;
- typedef BitField<bool, 31, 1> IsControlField;
InstructionCode opcode_;
uint32_t bit_field_;
@@ -587,12 +605,10 @@ std::ostream& operator<<(std::ostream& os, const PrintableInstruction& instr);
class GapInstruction : public Instruction {
public:
enum InnerPosition {
- BEFORE,
START,
END,
- AFTER,
- FIRST_INNER_POSITION = BEFORE,
- LAST_INNER_POSITION = AFTER
+ FIRST_INNER_POSITION = START,
+ LAST_INNER_POSITION = END
};
ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
@@ -631,10 +647,8 @@ class GapInstruction : public Instruction {
protected:
explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
- parallel_moves_[BEFORE] = NULL;
parallel_moves_[START] = NULL;
parallel_moves_[END] = NULL;
- parallel_moves_[AFTER] = NULL;
}
private:
@@ -675,6 +689,39 @@ class SourcePositionInstruction FINAL : public Instruction {
};
+class RpoNumber FINAL {
+ public:
+ static const int kInvalidRpoNumber = -1;
+ int ToInt() const {
+ DCHECK(IsValid());
+ return index_;
+ }
+ size_t ToSize() const {
+ DCHECK(IsValid());
+ return static_cast<size_t>(index_);
+ }
+ bool IsValid() const { return index_ >= 0; }
+ static RpoNumber FromInt(int index) { return RpoNumber(index); }
+ static RpoNumber Invalid() { return RpoNumber(kInvalidRpoNumber); }
+
+ bool IsNext(const RpoNumber other) const {
+ DCHECK(IsValid());
+ return other.index_ == this->index_ + 1;
+ }
+
+ bool operator==(RpoNumber other) const {
+ return this->index_ == other.index_;
+ }
+
+ private:
+ explicit RpoNumber(int32_t index) : index_(index) {}
+ int32_t index_;
+};
+
+
+std::ostream& operator<<(std::ostream&, const RpoNumber&);
+
+
class Constant FINAL {
public:
enum Type {
@@ -687,7 +734,7 @@ class Constant FINAL {
kRpoNumber
};
- explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
+ explicit Constant(int32_t v);
explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
explicit Constant(float v) : type_(kFloat32), value_(bit_cast<int32_t>(v)) {}
explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
@@ -695,8 +742,7 @@ class Constant FINAL {
: type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
explicit Constant(Handle<HeapObject> obj)
: type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
- explicit Constant(BasicBlock::RpoNumber rpo)
- : type_(kRpoNumber), value_(rpo.ToInt()) {}
+ explicit Constant(RpoNumber rpo) : type_(kRpoNumber), value_(rpo.ToInt()) {}
Type type() const { return type_; }
@@ -729,9 +775,9 @@ class Constant FINAL {
return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
}
- BasicBlock::RpoNumber ToRpoNumber() const {
+ RpoNumber ToRpoNumber() const {
DCHECK_EQ(kRpoNumber, type());
- return BasicBlock::RpoNumber::FromInt(static_cast<int>(value_));
+ return RpoNumber::FromInt(static_cast<int>(value_));
}
Handle<HeapObject> ToHeapObject() const {
@@ -815,10 +861,8 @@ class PhiInstruction FINAL : public ZoneObject {
// Analogue of BasicBlock for Instructions instead of Nodes.
class InstructionBlock FINAL : public ZoneObject {
public:
- InstructionBlock(Zone* zone, BasicBlock::Id id,
- BasicBlock::RpoNumber rpo_number,
- BasicBlock::RpoNumber loop_header,
- BasicBlock::RpoNumber loop_end, bool deferred);
+ InstructionBlock(Zone* zone, RpoNumber rpo_number, RpoNumber loop_header,
+ RpoNumber loop_end, bool deferred);
// Instruction indexes (used by the register allocator).
int first_instruction_index() const {
@@ -842,23 +886,22 @@ class InstructionBlock FINAL : public ZoneObject {
bool IsDeferred() const { return deferred_; }
- BasicBlock::Id id() const { return id_; }
- BasicBlock::RpoNumber ao_number() const { return ao_number_; }
- BasicBlock::RpoNumber rpo_number() const { return rpo_number_; }
- BasicBlock::RpoNumber loop_header() const { return loop_header_; }
- BasicBlock::RpoNumber loop_end() const {
+ RpoNumber ao_number() const { return ao_number_; }
+ RpoNumber rpo_number() const { return rpo_number_; }
+ RpoNumber loop_header() const { return loop_header_; }
+ RpoNumber loop_end() const {
DCHECK(IsLoopHeader());
return loop_end_;
}
inline bool IsLoopHeader() const { return loop_end_.IsValid(); }
- typedef ZoneVector<BasicBlock::RpoNumber> Predecessors;
+ typedef ZoneVector<RpoNumber> Predecessors;
Predecessors& predecessors() { return predecessors_; }
const Predecessors& predecessors() const { return predecessors_; }
size_t PredecessorCount() const { return predecessors_.size(); }
- size_t PredecessorIndexOf(BasicBlock::RpoNumber rpo_number) const;
+ size_t PredecessorIndexOf(RpoNumber rpo_number) const;
- typedef ZoneVector<BasicBlock::RpoNumber> Successors;
+ typedef ZoneVector<RpoNumber> Successors;
Successors& successors() { return successors_; }
const Successors& successors() const { return successors_; }
size_t SuccessorCount() const { return successors_.size(); }
@@ -867,19 +910,16 @@ class InstructionBlock FINAL : public ZoneObject {
const PhiInstructions& phis() const { return phis_; }
void AddPhi(PhiInstruction* phi) { phis_.push_back(phi); }
- void set_ao_number(BasicBlock::RpoNumber ao_number) {
- ao_number_ = ao_number;
- }
+ void set_ao_number(RpoNumber ao_number) { ao_number_ = ao_number; }
private:
Successors successors_;
Predecessors predecessors_;
PhiInstructions phis_;
- const BasicBlock::Id id_;
- BasicBlock::RpoNumber ao_number_; // Assembly order number.
- const BasicBlock::RpoNumber rpo_number_;
- const BasicBlock::RpoNumber loop_header_;
- const BasicBlock::RpoNumber loop_end_;
+ RpoNumber ao_number_; // Assembly order number.
+ const RpoNumber rpo_number_;
+ const RpoNumber loop_header_;
+ const RpoNumber loop_end_;
int32_t code_start_; // start index of arch-specific code.
int32_t code_end_; // end index of arch-specific code.
const bool deferred_; // Block contains deferred code.
@@ -921,7 +961,7 @@ class InstructionSequence FINAL : public ZoneObject {
return static_cast<int>(instruction_blocks_->size());
}
- InstructionBlock* InstructionBlockAt(BasicBlock::RpoNumber rpo_number) {
+ InstructionBlock* InstructionBlockAt(RpoNumber rpo_number) {
return instruction_blocks_->at(rpo_number.ToSize());
}
@@ -930,8 +970,7 @@ class InstructionSequence FINAL : public ZoneObject {
->last_instruction_index();
}
- const InstructionBlock* InstructionBlockAt(
- BasicBlock::RpoNumber rpo_number) const {
+ const InstructionBlock* InstructionBlockAt(RpoNumber rpo_number) const {
return instruction_blocks_->at(rpo_number.ToSize());
}
@@ -945,7 +984,7 @@ class InstructionSequence FINAL : public ZoneObject {
void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
- GapInstruction* GetBlockStart(BasicBlock::RpoNumber rpo) const;
+ GapInstruction* GetBlockStart(RpoNumber rpo) const;
typedef InstructionDeque::const_iterator const_iterator;
const_iterator begin() const { return instructions_.begin(); }
@@ -968,8 +1007,8 @@ class InstructionSequence FINAL : public ZoneObject {
// Used by the instruction selector while adding instructions.
int AddInstruction(Instruction* instr);
- void StartBlock(BasicBlock::RpoNumber rpo);
- void EndBlock(BasicBlock::RpoNumber rpo);
+ void StartBlock(RpoNumber rpo);
+ void EndBlock(RpoNumber rpo);
int AddConstant(int virtual_register, Constant constant) {
// TODO(titzer): allow RPO numbers as constants?
@@ -1014,7 +1053,7 @@ class InstructionSequence FINAL : public ZoneObject {
FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
int GetFrameStateDescriptorCount();
- BasicBlock::RpoNumber InputRpo(Instruction* instr, size_t index) {
+ RpoNumber InputRpo(Instruction* instr, size_t index) {
InstructionOperand* operand = instr->InputAt(index);
Constant constant = operand->IsImmediate() ? GetImmediate(operand->index())
: GetConstant(operand->index());
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index a89f4a3255..12b0e2f6cc 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -100,38 +100,6 @@ JSBuiltinReducer::JSBuiltinReducer(JSGraph* jsgraph)
: jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
-// ECMA-262, section 15.8.2.1.
-Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Unsigned32())) {
- // Math.abs(a:uint32) -> a
- return Replace(r.left());
- }
- if (r.InputsMatchOne(Type::Number())) {
- // Math.abs(a:number) -> (a > 0 ? a : 0 - a)
- Node* const value = r.left();
- Node* const zero = jsgraph()->ZeroConstant();
- return Replace(graph()->NewNode(
- common()->Select(kMachNone),
- graph()->NewNode(simplified()->NumberLessThan(), zero, value), value,
- graph()->NewNode(simplified()->NumberSubtract(), zero, value)));
- }
- return NoChange();
-}
-
-
-// ECMA-262, section 15.8.2.17.
-Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.sqrt(a:number) -> Float64Sqrt(a)
- Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-
// ECMA-262, section 15.8.2.11.
Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
JSCallReduction r(node);
@@ -184,52 +152,18 @@ Reduction JSBuiltinReducer::ReduceMathFround(Node* node) {
}
-// ES6 draft 10-14-14, section 20.2.2.16.
-Reduction JSBuiltinReducer::ReduceMathFloor(Node* node) {
- if (!machine()->HasFloat64Floor()) return NoChange();
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.floor(a:number) -> Float64Floor(a)
- Node* value = graph()->NewNode(machine()->Float64Floor(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-
-// ES6 draft 10-14-14, section 20.2.2.10.
-Reduction JSBuiltinReducer::ReduceMathCeil(Node* node) {
- if (!machine()->HasFloat64Ceil()) return NoChange();
- JSCallReduction r(node);
- if (r.InputsMatchOne(Type::Number())) {
- // Math.ceil(a:number) -> Float64Ceil(a)
- Node* value = graph()->NewNode(machine()->Float64Ceil(), r.left());
- return Replace(value);
- }
- return NoChange();
-}
-
-
Reduction JSBuiltinReducer::Reduce(Node* node) {
JSCallReduction r(node);
// Dispatch according to the BuiltinFunctionId if present.
if (!r.HasBuiltinFunctionId()) return NoChange();
switch (r.GetBuiltinFunctionId()) {
- case kMathAbs:
- return ReplaceWithPureReduction(node, ReduceMathAbs(node));
- case kMathSqrt:
- return ReplaceWithPureReduction(node, ReduceMathSqrt(node));
case kMathMax:
return ReplaceWithPureReduction(node, ReduceMathMax(node));
case kMathImul:
return ReplaceWithPureReduction(node, ReduceMathImul(node));
case kMathFround:
return ReplaceWithPureReduction(node, ReduceMathFround(node));
- case kMathFloor:
- return ReplaceWithPureReduction(node, ReduceMathFloor(node));
- case kMathCeil:
- return ReplaceWithPureReduction(node, ReduceMathCeil(node));
default:
break;
}
diff --git a/deps/v8/src/compiler/js-builtin-reducer.h b/deps/v8/src/compiler/js-builtin-reducer.h
index ac6f266eed..42221e9727 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.h
+++ b/deps/v8/src/compiler/js-builtin-reducer.h
@@ -26,13 +26,9 @@ class JSBuiltinReducer FINAL : public Reducer {
Reduction Reduce(Node* node) FINAL;
private:
- Reduction ReduceMathAbs(Node* node);
- Reduction ReduceMathSqrt(Node* node);
Reduction ReduceMathMax(Node* node);
Reduction ReduceMathImul(Node* node);
Reduction ReduceMathFround(Node* node);
- Reduction ReduceMathFloor(Node* node);
- Reduction ReduceMathCeil(Node* node);
JSGraph* jsgraph() const { return jsgraph_; }
Graph* graph() const;
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 4720c582ec..e1dac82b2c 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -6,6 +6,7 @@
#include "src/code-stubs.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
@@ -20,6 +21,9 @@ JSGenericLowering::JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph)
: is_typing_enabled_(is_typing_enabled), jsgraph_(jsgraph) {}
+JSGenericLowering::~JSGenericLowering() {}
+
+
Reduction JSGenericLowering::Reduce(Node* node) {
switch (node->opcode()) {
#define DECLARE_CASE(x) \
@@ -101,13 +105,12 @@ REPLACE_RUNTIME_CALL(JSCreateScriptContext, Runtime::kAbort)
#define REPLACE_UNIMPLEMENTED(op) \
void JSGenericLowering::Lower##op(Node* node) { UNIMPLEMENTED(); }
REPLACE_UNIMPLEMENTED(JSYield)
-REPLACE_UNIMPLEMENTED(JSDebugger)
#undef REPLACE_UNIMPLEMENTED
static CallDescriptor::Flags FlagsForNode(Node* node) {
CallDescriptor::Flags result = CallDescriptor::kNoFlags;
- if (OperatorProperties::HasFrameStateInput(node->op())) {
+ if (OperatorProperties::GetFrameStateInputCount(node->op()) > 0) {
result |= CallDescriptor::kNeedsFrameState;
}
return result;
@@ -118,7 +121,8 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
Callable callable = CodeFactory::CompareIC(isolate(), token);
CallDescriptor* desc_compare = Linkage::GetStubCallDescriptor(
isolate(), zone(), callable.descriptor(), 0,
- CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node));
+ CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node),
+ Operator::kNoProperties, kMachInt32);
// Create a new call node asking a CompareIC for help.
NodeVector inputs(zone());
@@ -130,14 +134,14 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
if (node->op()->HasProperty(Operator::kPure)) {
// A pure (strict) comparison doesn't have an effect, control or frame
// state. But for the graph, we need to add control and effect inputs.
- DCHECK(!OperatorProperties::HasFrameStateInput(node->op()));
+ DCHECK(OperatorProperties::GetFrameStateInputCount(node->op()) == 0);
inputs.push_back(graph()->start());
inputs.push_back(graph()->start());
} else {
- DCHECK(OperatorProperties::HasFrameStateInput(node->op()) ==
+ DCHECK((OperatorProperties::GetFrameStateInputCount(node->op()) == 1) ==
FLAG_turbo_deoptimization);
if (FLAG_turbo_deoptimization) {
- inputs.push_back(NodeProperties::GetFrameStateInput(node));
+ inputs.push_back(NodeProperties::GetFrameStateInput(node, 0));
}
inputs.push_back(NodeProperties::GetEffectInput(node));
inputs.push_back(NodeProperties::GetControlInput(node));
@@ -198,12 +202,26 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
CallDescriptor::Flags flags) {
Operator::Properties properties = node->op()->properties();
- CallDescriptor* desc =
- Linkage::GetStubCallDescriptor(isolate(), zone(), callable.descriptor(),
- 0, flags | FlagsForNode(node), properties);
+ flags |= FlagsForNode(node);
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 0, flags, properties);
+ const Operator* new_op = common()->Call(desc);
+
+ // Take care of frame states.
+ int old_frame_state_count =
+ OperatorProperties::GetFrameStateInputCount(node->op());
+ int new_frame_state_count =
+ (flags & CallDescriptor::kNeedsFrameState) ? 1 : 0;
+ DCHECK_GE(old_frame_state_count, new_frame_state_count);
+ // If there are extra frame states, get rid of them.
+ for (int i = new_frame_state_count; i < old_frame_state_count; i++) {
+ node->RemoveInput(NodeProperties::FirstFrameStateIndex(node) +
+ new_frame_state_count);
+ }
+
Node* stub_code = jsgraph()->HeapConstant(callable.code());
node->InsertInput(zone(), 0, stub_code);
- node->set_op(common()->Call(desc));
+ node->set_op(new_op);
}
@@ -291,7 +309,8 @@ void JSGenericLowering::LowerJSToObject(Node* node) {
void JSGenericLowering::LowerJSLoadProperty(Node* node) {
const LoadPropertyParameters& p = LoadPropertyParametersOf(node->op());
- Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
+ Callable callable =
+ CodeFactory::KeyedLoadICInOptimizedCode(isolate(), UNINITIALIZED);
if (FLAG_vector_ics) {
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
node->InsertInput(zone(), 3,
@@ -303,8 +322,8 @@ void JSGenericLowering::LowerJSLoadProperty(Node* node) {
void JSGenericLowering::LowerJSLoadNamed(Node* node) {
const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
- Callable callable =
- CodeFactory::LoadICInOptimizedCode(isolate(), p.contextual_mode());
+ Callable callable = CodeFactory::LoadICInOptimizedCode(
+ isolate(), p.contextual_mode(), UNINITIALIZED);
node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
if (FLAG_vector_ics) {
node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
@@ -317,7 +336,8 @@ void JSGenericLowering::LowerJSLoadNamed(Node* node) {
void JSGenericLowering::LowerJSStoreProperty(Node* node) {
LanguageMode language_mode = OpParameter<LanguageMode>(node);
- Callable callable = CodeFactory::KeyedStoreIC(isolate(), language_mode);
+ Callable callable = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), language_mode, UNINITIALIZED);
ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
}
@@ -478,6 +498,59 @@ void JSGenericLowering::LowerJSCallRuntime(Node* node) {
ReplaceWithRuntimeCall(node, p.id(), static_cast<int>(p.arity()));
}
+
+void JSGenericLowering::LowerJSStackCheck(Node* node) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* limit = graph()->NewNode(
+ machine()->Load(kMachPtr),
+ jsgraph()->ExternalConstant(
+ ExternalReference::address_of_stack_limit(isolate())),
+ jsgraph()->IntPtrConstant(0), effect, control);
+ Node* pointer = graph()->NewNode(machine()->LoadStackPointer());
+
+ Node* check = graph()->NewNode(machine()->UintLessThan(), limit, pointer);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = effect;
+
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ NodeProperties::ReplaceControlInput(node, if_false);
+ Node* efalse = node;
+
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
+
+ // Relax controls of {node}, i.e. make it free floating.
+ NodeProperties::ReplaceWithValue(node, node, ephi, merge);
+ NodeProperties::ReplaceEffectInput(ephi, efalse, 1);
+
+ // Turn the stack check into a runtime call.
+ ReplaceWithRuntimeCall(node, Runtime::kStackGuard);
+}
+
+
+Zone* JSGenericLowering::zone() const { return graph()->zone(); }
+
+
+Isolate* JSGenericLowering::isolate() const { return jsgraph()->isolate(); }
+
+
+Graph* JSGenericLowering::graph() const { return jsgraph()->graph(); }
+
+
+CommonOperatorBuilder* JSGenericLowering::common() const {
+ return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* JSGenericLowering::machine() const {
+ return jsgraph()->machine();
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/js-generic-lowering.h b/deps/v8/src/compiler/js-generic-lowering.h
index 10057eb9e1..5ca09ef5ab 100644
--- a/deps/v8/src/compiler/js-generic-lowering.h
+++ b/deps/v8/src/compiler/js-generic-lowering.h
@@ -5,11 +5,8 @@
#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
#define V8_COMPILER_JS_GENERIC_LOWERING_H_
-#include "src/allocation.h"
#include "src/code-factory.h"
-#include "src/compiler/graph.h"
#include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
#include "src/compiler/opcodes.h"
@@ -19,6 +16,7 @@ namespace compiler {
// Forward declarations.
class CommonOperatorBuilder;
+class JSGraph;
class MachineOperatorBuilder;
class Linkage;
@@ -26,8 +24,8 @@ class Linkage;
// Lowers JS-level operators to runtime and IC calls in the "generic" case.
class JSGenericLowering FINAL : public Reducer {
public:
- JSGenericLowering(bool is_typing_enabled, JSGraph* graph);
- ~JSGenericLowering() FINAL {}
+ JSGenericLowering(bool is_typing_enabled, JSGraph* jsgraph);
+ ~JSGenericLowering() FINAL;
Reduction Reduce(Node* node) FINAL;
@@ -46,16 +44,16 @@ class JSGenericLowering FINAL : public Reducer {
// Helper for optimization of JSCallFunction.
bool TryLowerDirectJSCall(Node* node);
- Zone* zone() const { return graph()->zone(); }
- Isolate* isolate() const { return jsgraph()->isolate(); }
+ Zone* zone() const;
+ Isolate* isolate() const;
JSGraph* jsgraph() const { return jsgraph_; }
- Graph* graph() const { return jsgraph()->graph(); }
- CommonOperatorBuilder* common() const { return jsgraph()->common(); }
- MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
+ Graph* graph() const;
+ CommonOperatorBuilder* common() const;
+ MachineOperatorBuilder* machine() const;
private:
- bool is_typing_enabled_;
- JSGraph* jsgraph_;
+ bool const is_typing_enabled_;
+ JSGraph* const jsgraph_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index 649b0d68d1..1f3c25d06a 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -204,7 +204,7 @@ Node* JSGraph::EmptyFrameState() {
if (!empty_frame_state_.is_set()) {
Node* values = graph()->NewNode(common()->StateValues(0));
Node* state_node = graph()->NewNode(
- common()->FrameState(JS_FRAME, BailoutId(0),
+ common()->FrameState(JS_FRAME, BailoutId::None(),
OutputFrameStateCombine::Ignore()),
values, values, values, NoContextConstant(), UndefinedConstant());
empty_frame_state_.set(state_node);
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index 91d0823dae..6c7b66681d 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -2,29 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/js-inlining.h"
+
#include "src/ast.h"
#include "src/ast-numbering.h"
-#include "src/compiler/access-builder.h"
+#include "src/compiler/all-nodes.h"
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
-#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-context-specialization.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/typer.h"
+#include "src/compiler/operator-properties.h"
#include "src/full-codegen.h"
#include "src/parser.h"
#include "src/rewriter.h"
#include "src/scopes.h"
-
namespace v8 {
namespace internal {
namespace compiler {
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_inlining) PrintF(__VA_ARGS__); \
+ } while (false)
+
// Provides convenience accessors for calls to JS functions.
class JSCallFunctionAccessor {
@@ -49,35 +52,14 @@ class JSCallFunctionAccessor {
return value_inputs - 2;
}
- Node* frame_state() { return NodeProperties::GetFrameStateInput(call_); }
+ Node* frame_state() { return NodeProperties::GetFrameStateInput(call_, 0); }
private:
Node* call_;
};
-Reduction JSInliner::Reduce(Node* node) {
- if (node->opcode() != IrOpcode::kJSCallFunction) return NoChange();
-
- JSCallFunctionAccessor call(node);
- HeapObjectMatcher<JSFunction> match(call.jsfunction());
- if (!match.HasValue()) return NoChange();
-
- Handle<JSFunction> jsfunction = match.Value().handle();
-
- if (jsfunction->shared()->native()) {
- if (FLAG_trace_turbo_inlining) {
- SmartArrayPointer<char> name =
- jsfunction->shared()->DebugName()->ToCString();
- PrintF("Not Inlining %s into %s because inlinee is native\n", name.get(),
- info_->shared_info()->DebugName()->ToCString().get());
- }
- return NoChange();
- }
-
- return TryInlineJSCall(node, jsfunction);
-}
-
+namespace {
// A facade on a JSFunction's graph to facilitate inlining. It assumes the
// that the function graph has only one return statement, and provides
@@ -100,6 +82,11 @@ class Inlinee {
Node* value_output() {
return NodeProperties::GetValueInput(unique_return(), 0);
}
+ // Return the control output of the graph,
+ // that is the control input of the return statement of the inlinee.
+ Node* control_output() {
+ return NodeProperties::GetControlInput(unique_return(), 0);
+ }
// Return the unique return statement of the graph.
Node* unique_return() {
Node* unique_return = NodeProperties::GetControlInput(end_);
@@ -155,7 +142,7 @@ void Inlinee::UnifyReturn(JSGraph* jsgraph) {
values.push_back(NodeProperties::GetValueInput(input, 0));
effects.push_back(NodeProperties::GetEffectInput(input));
edge.UpdateTo(NodeProperties::GetControlInput(input));
- input->RemoveAllInputs();
+ input->NullAllInputs();
break;
default:
UNREACHABLE();
@@ -174,86 +161,69 @@ void Inlinee::UnifyReturn(JSGraph* jsgraph) {
}
-class CopyVisitor : public NullNodeVisitor {
+class CopyVisitor {
public:
CopyVisitor(Graph* source_graph, Graph* target_graph, Zone* temp_zone)
- : copies_(source_graph->NodeCount(), NULL, temp_zone),
- sentinels_(source_graph->NodeCount(), NULL, temp_zone),
+ : sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, "Sentinel", 0, 0,
+ 0, 0, 0, 0),
+ sentinel_(target_graph->NewNode(&sentinel_op_)),
+ copies_(source_graph->NodeCount(), sentinel_, temp_zone),
source_graph_(source_graph),
target_graph_(target_graph),
- temp_zone_(temp_zone),
- sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, "sentinel", 0, 0,
- 0, 0, 0, 0) {}
+ temp_zone_(temp_zone) {}
+
+ Node* GetCopy(Node* orig) { return copies_[orig->id()]; }
- void Post(Node* original) {
+ void CopyGraph() {
NodeVector inputs(temp_zone_);
- for (Node* const node : original->inputs()) {
- inputs.push_back(GetCopy(node));
+ // TODO(bmeurer): AllNodes should be turned into something like
+ // Graph::CollectNodesReachableFromEnd() and the gray set stuff should be
+ // removed since it's only needed by the visualizer.
+ AllNodes all(temp_zone_, source_graph_);
+ // Copy all nodes reachable from end.
+ for (Node* orig : all.live) {
+ Node* copy = GetCopy(orig);
+ if (copy != sentinel_) {
+ // Mapping already exists.
+ continue;
+ }
+ // Copy the node.
+ inputs.clear();
+ for (Node* input : orig->inputs()) inputs.push_back(copies_[input->id()]);
+ copy = target_graph_->NewNode(orig->op(), orig->InputCount(),
+ inputs.empty() ? nullptr : &inputs[0]);
+ copies_[orig->id()] = copy;
}
-
- // Reuse the operator in the copy. This assumes that op lives in a zone
- // that lives longer than graph()'s zone.
- Node* copy =
- target_graph_->NewNode(original->op(), static_cast<int>(inputs.size()),
- (inputs.empty() ? NULL : &inputs.front()));
- copies_[original->id()] = copy;
- }
-
- Node* GetCopy(Node* original) {
- Node* copy = copies_[original->id()];
- if (copy == NULL) {
- copy = GetSentinel(original);
+ // For missing inputs.
+ for (Node* orig : all.live) {
+ Node* copy = copies_[orig->id()];
+ for (int i = 0; i < copy->InputCount(); ++i) {
+ Node* input = copy->InputAt(i);
+ if (input == sentinel_) {
+ copy->ReplaceInput(i, GetCopy(orig->InputAt(i)));
+ }
+ }
}
- DCHECK(copy);
- return copy;
- }
-
- void CopyGraph() {
- source_graph_->VisitNodeInputsFromEnd(this);
- ReplaceSentinels();
}
- const NodeVector& copies() { return copies_; }
+ const NodeVector& copies() const { return copies_; }
private:
- void ReplaceSentinels() {
- for (NodeId id = 0; id < source_graph_->NodeCount(); ++id) {
- Node* sentinel = sentinels_[id];
- if (sentinel == NULL) continue;
- Node* copy = copies_[id];
- DCHECK(copy);
- sentinel->ReplaceUses(copy);
- }
- }
-
- Node* GetSentinel(Node* original) {
- if (sentinels_[original->id()] == NULL) {
- sentinels_[original->id()] = target_graph_->NewNode(&sentinel_op_);
- }
- return sentinels_[original->id()];
- }
-
+ Operator const sentinel_op_;
+ Node* const sentinel_;
NodeVector copies_;
- NodeVector sentinels_;
- Graph* source_graph_;
- Graph* target_graph_;
- Zone* temp_zone_;
- Operator sentinel_op_;
+ Graph* const source_graph_;
+ Graph* const target_graph_;
+ Zone* const temp_zone_;
};
Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
// The scheduler is smart enough to place our code; we just ensure {control}
- // becomes the control input of the start of the inlinee.
+ // becomes the control input of the start of the inlinee, and {effect} becomes
+ // the effect input of the start of the inlinee.
Node* control = NodeProperties::GetControlInput(call);
-
- // The inlinee uses the context from the JSFunction object. This will
- // also be the effect dependency for the inlinee as it produces an effect.
- SimplifiedOperatorBuilder simplified(jsgraph->zone());
- Node* context = jsgraph->graph()->NewNode(
- simplified.LoadField(AccessBuilder::ForJSFunctionContext()),
- NodeProperties::GetValueInput(call, 0),
- NodeProperties::GetEffectInput(call), control);
+ Node* effect = NodeProperties::GetEffectInput(call);
// Context is last argument.
int inlinee_context_index = static_cast<int>(total_parameters()) - 1;
@@ -271,9 +241,9 @@ Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
// projection but not the context, so rewire the input.
NodeProperties::ReplaceWithValue(use, call->InputAt(index));
} else if (index == inlinee_context_index) {
- // This is the context projection, rewire it to the context from the
- // JSFunction object.
- NodeProperties::ReplaceWithValue(use, context);
+ // TODO(turbofan): We always context specialize inlinees currently, so
+ // we should never get here.
+ UNREACHABLE();
} else if (index < inlinee_context_index) {
// Call has fewer arguments than required, fill with undefined.
NodeProperties::ReplaceWithValue(use, jsgraph->UndefinedConstant());
@@ -285,7 +255,7 @@ Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
}
default:
if (NodeProperties::IsEffectEdge(edge)) {
- edge.UpdateTo(context);
+ edge.UpdateTo(effect);
} else if (NodeProperties::IsControlEdge(edge)) {
edge.UpdateTo(control);
} else {
@@ -295,10 +265,14 @@ Reduction Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
}
}
- NodeProperties::ReplaceWithValue(call, value_output(), effect_output());
+ NodeProperties::ReplaceWithValue(call, value_output(), effect_output(),
+ control_output());
+
return Reducer::Replace(value_output());
}
+} // namespace
+
void JSInliner::AddClosureToFrameState(Node* frame_state,
Handle<JSFunction> jsfunction) {
@@ -333,38 +307,52 @@ Node* JSInliner::CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
}
-Reduction JSInliner::TryInlineJSCall(Node* call_node,
- Handle<JSFunction> function) {
- JSCallFunctionAccessor call(call_node);
- CompilationInfoWithZone info(function);
+Reduction JSInliner::Reduce(Node* node) {
+ if (node->opcode() != IrOpcode::kJSCallFunction) return NoChange();
+
+ JSCallFunctionAccessor call(node);
+ HeapObjectMatcher<JSFunction> match(call.jsfunction());
+ if (!match.HasValue()) return NoChange();
- if (!Compiler::ParseAndAnalyze(&info)) return NoChange();
+ Handle<JSFunction> function = match.Value().handle();
+ if (!function->IsJSFunction()) return NoChange();
+ if (mode_ == kBuiltinsInlining && !function->shared()->inline_builtin()) {
+ return NoChange();
+ }
+
+ Zone zone;
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info);
+
+ if (!Compiler::ParseAndAnalyze(info.parse_info())) return NoChange();
if (!Compiler::EnsureDeoptimizationSupport(&info)) return NoChange();
if (info.scope()->arguments() != NULL && is_sloppy(info.language_mode())) {
// For now do not inline functions that use their arguments array.
- SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
- if (FLAG_trace_turbo_inlining) {
- PrintF(
- "Not Inlining %s into %s because inlinee uses arguments "
- "array\n",
- name.get(), info_->shared_info()->DebugName()->ToCString().get());
- }
+ TRACE("Not Inlining %s into %s because inlinee uses arguments array\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
return NoChange();
}
- if (FLAG_trace_turbo_inlining) {
- SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
- PrintF("Inlining %s into %s\n", name.get(),
- info_->shared_info()->DebugName()->ToCString().get());
- }
+ TRACE("Inlining %s into %s\n",
+ function->shared()->DebugName()->ToCString().get(),
+ info_->shared_info()->DebugName()->ToCString().get());
Graph graph(info.zone());
JSGraph jsgraph(info.isolate(), &graph, jsgraph_->common(),
jsgraph_->javascript(), jsgraph_->machine());
+ // The inlinee specializes to the context from the JSFunction object.
+ // TODO(turbofan): We might want to load the context from the JSFunction at
+ // runtime in case we only know the SharedFunctionInfo once we have dynamic
+ // type feedback in the compiler.
AstGraphBuilder graph_builder(local_zone_, &info, &jsgraph);
- graph_builder.CreateGraph(false);
+ graph_builder.CreateGraph(true, false);
+ JSContextSpecializer context_specializer(&jsgraph);
+ GraphReducer graph_reducer(&graph, local_zone_);
+ graph_reducer.AddReducer(&context_specializer);
+ graph_reducer.ReduceGraph();
Inlinee::UnifyReturn(&jsgraph);
CopyVisitor visitor(&graph, jsgraph_->graph(), info.zone());
@@ -382,13 +370,14 @@ Reduction JSInliner::TryInlineJSCall(Node* call_node,
for (Node* node : visitor.copies()) {
if (node && node->opcode() == IrOpcode::kFrameState) {
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
AddClosureToFrameState(node, function);
- NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
+ NodeProperties::ReplaceFrameStateInput(node, 0, outer_frame_state);
}
}
}
- return inlinee.InlineAtCall(jsgraph_, call_node);
+ return inlinee.InlineAtCall(jsgraph_, node);
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/js-inlining.h b/deps/v8/src/compiler/js-inlining.h
index 8a4e0c1780..0059b1c56c 100644
--- a/deps/v8/src/compiler/js-inlining.h
+++ b/deps/v8/src/compiler/js-inlining.h
@@ -16,13 +16,16 @@ class JSCallFunctionAccessor;
class JSInliner FINAL : public Reducer {
public:
- JSInliner(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph)
- : local_zone_(local_zone), info_(info), jsgraph_(jsgraph) {}
+ enum Mode { kBuiltinsInlining, kGeneralInlining };
- Reduction Reduce(Node* node) OVERRIDE;
+ JSInliner(Mode mode, Zone* local_zone, CompilationInfo* info,
+ JSGraph* jsgraph)
+ : mode_(mode), local_zone_(local_zone), info_(info), jsgraph_(jsgraph) {}
+
+ Reduction Reduce(Node* node) FINAL;
private:
- friend class InlinerVisitor;
+ Mode const mode_;
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
@@ -31,11 +34,10 @@ class JSInliner FINAL : public Reducer {
Handle<JSFunction> jsfunction,
Zone* temp_zone);
void AddClosureToFrameState(Node* frame_state, Handle<JSFunction> jsfunction);
- Reduction TryInlineJSCall(Node* node, Handle<JSFunction> jsfunction);
- static void UnifyReturn(Graph* graph);
};
-}
-}
-} // namespace v8::internal::compiler
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
#endif // V8_COMPILER_JS_INLINING_H_
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index a1e693585b..7253aab787 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -1,3 +1,4 @@
+
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,6 +7,7 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
#include "src/compiler/node-properties.h"
namespace v8 {
@@ -22,18 +24,50 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
Runtime::FunctionForId(CallRuntimeParametersOf(node->op()).id());
if (f->intrinsic_type != Runtime::IntrinsicType::INLINE) return NoChange();
switch (f->function_id) {
- case Runtime::kInlineIsSmi:
- return ReduceInlineIsSmi(node);
- case Runtime::kInlineIsNonNegativeSmi:
- return ReduceInlineIsNonNegativeSmi(node);
+ case Runtime::kInlineConstructDouble:
+ return ReduceConstructDouble(node);
+ case Runtime::kInlineDeoptimizeNow:
+ return ReduceDeoptimizeNow(node);
+ case Runtime::kInlineDoubleHi:
+ return ReduceDoubleHi(node);
+ case Runtime::kInlineDoubleLo:
+ return ReduceDoubleLo(node);
+ case Runtime::kInlineHeapObjectGetMap:
+ return ReduceHeapObjectGetMap(node);
+ case Runtime::kInlineIncrementStatsCounter:
+ return ReduceIncrementStatsCounter(node);
case Runtime::kInlineIsArray:
- return ReduceInlineIsInstanceType(node, JS_ARRAY_TYPE);
+ return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
case Runtime::kInlineIsFunction:
- return ReduceInlineIsInstanceType(node, JS_FUNCTION_TYPE);
+ return ReduceIsInstanceType(node, JS_FUNCTION_TYPE);
+ case Runtime::kInlineIsNonNegativeSmi:
+ return ReduceIsNonNegativeSmi(node);
case Runtime::kInlineIsRegExp:
- return ReduceInlineIsInstanceType(node, JS_REGEXP_TYPE);
+ return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
+ case Runtime::kInlineIsSmi:
+ return ReduceIsSmi(node);
+ case Runtime::kInlineJSValueGetValue:
+ return ReduceJSValueGetValue(node);
+ case Runtime::kInlineMapGetInstanceType:
+ return ReduceMapGetInstanceType(node);
+ case Runtime::kInlineMathClz32:
+ return ReduceMathClz32(node);
+ case Runtime::kInlineMathFloor:
+ return ReduceMathFloor(node);
+ case Runtime::kInlineMathSqrt:
+ return ReduceMathSqrt(node);
+ case Runtime::kInlineOneByteSeqStringGetChar:
+ return ReduceSeqStringGetChar(node, String::ONE_BYTE_ENCODING);
+ case Runtime::kInlineOneByteSeqStringSetChar:
+ return ReduceSeqStringSetChar(node, String::ONE_BYTE_ENCODING);
+ case Runtime::kInlineStringGetLength:
+ return ReduceStringGetLength(node);
+ case Runtime::kInlineTwoByteSeqStringGetChar:
+ return ReduceSeqStringGetChar(node, String::TWO_BYTE_ENCODING);
+ case Runtime::kInlineTwoByteSeqStringSetChar:
+ return ReduceSeqStringSetChar(node, String::TWO_BYTE_ENCODING);
case Runtime::kInlineValueOf:
- return ReduceInlineValueOf(node);
+ return ReduceValueOf(node);
default:
break;
}
@@ -41,17 +75,95 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
}
-Reduction JSIntrinsicLowering::ReduceInlineIsSmi(Node* node) {
- return Change(node, simplified()->ObjectIsSmi());
+Reduction JSIntrinsicLowering::ReduceConstructDouble(Node* node) {
+ Node* high = NodeProperties::GetValueInput(node, 0);
+ Node* low = NodeProperties::GetValueInput(node, 1);
+ Node* value =
+ graph()->NewNode(machine()->Float64InsertHighWord32(),
+ graph()->NewNode(machine()->Float64InsertLowWord32(),
+ jsgraph()->Constant(0), low),
+ high);
+ NodeProperties::ReplaceWithValue(node, value);
+ return Replace(value);
}
-Reduction JSIntrinsicLowering::ReduceInlineIsNonNegativeSmi(Node* node) {
- return Change(node, simplified()->ObjectIsNonNegativeSmi());
+Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
+ if (!FLAG_turbo_deoptimization) return NoChange();
+
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ DCHECK_EQ(frame_state->opcode(), IrOpcode::kFrameState);
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // We are making the continuation after the call dead. To
+ // model this, we generate if (true) statement with deopt
+ // in the true branch and continuation in the false branch.
+ Node* branch =
+ graph()->NewNode(common()->Branch(), jsgraph()->TrueConstant(), control);
+
+ // False branch - the original continuation.
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ NodeProperties::ReplaceWithValue(node, jsgraph()->UndefinedConstant(), effect,
+ if_false);
+
+ // True branch: deopt.
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* deopt =
+ graph()->NewNode(common()->Deoptimize(), frame_state, effect, if_true);
+
+ // Connect the deopt to the merge exiting the graph.
+ NodeProperties::MergeControlToEnd(graph(), common(), deopt);
+
+ return Changed(deopt);
}
-Reduction JSIntrinsicLowering::ReduceInlineIsInstanceType(
+Reduction JSIntrinsicLowering::ReduceDoubleHi(Node* node) {
+ return Change(node, machine()->Float64ExtractHighWord32());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceDoubleLo(Node* node) {
+ return Change(node, machine()->Float64ExtractLowWord32());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceHeapObjectGetMap(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForMap()), value,
+ effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceIncrementStatsCounter(Node* node) {
+ if (!FLAG_native_code_counters) return ChangeToUndefined(node);
+ HeapObjectMatcher<String> m(NodeProperties::GetValueInput(node, 0));
+ if (!m.HasValue() || !m.Value().handle()->IsString()) {
+ return ChangeToUndefined(node);
+ }
+ SmartArrayPointer<char> name = m.Value().handle()->ToCString();
+ StatsCounter counter(jsgraph()->isolate(), name.get());
+ if (!counter.Enabled()) return ChangeToUndefined(node);
+
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ FieldAccess access = AccessBuilder::ForStatsCounter();
+ Node* cnt = jsgraph()->ExternalConstant(ExternalReference(&counter));
+ Node* load =
+ graph()->NewNode(simplified()->LoadField(access), cnt, effect, control);
+ Node* inc =
+ graph()->NewNode(machine()->Int32Add(), load, jsgraph()->OneConstant());
+ Node* store = graph()->NewNode(simplified()->StoreField(access), cnt, inc,
+ load, control);
+ return ChangeToUndefined(node, store);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceIsInstanceType(
Node* node, InstanceType instance_type) {
// if (%_IsSmi(value)) {
// return false;
@@ -91,7 +203,97 @@ Reduction JSIntrinsicLowering::ReduceInlineIsInstanceType(
}
-Reduction JSIntrinsicLowering::ReduceInlineValueOf(Node* node) {
+Reduction JSIntrinsicLowering::ReduceIsNonNegativeSmi(Node* node) {
+ return Change(node, simplified()->ObjectIsNonNegativeSmi());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceIsSmi(Node* node) {
+ return Change(node, simplified()->ObjectIsSmi());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceJSValueGetValue(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForValue()), value,
+ effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceMapGetInstanceType(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ return Change(node,
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ value, effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceMathClz32(Node* node) {
+ return Change(node, machine()->Word32Clz());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceMathFloor(Node* node) {
+ if (!machine()->HasFloat64RoundDown()) return NoChange();
+ return Change(node, machine()->Float64RoundDown());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceMathSqrt(Node* node) {
+ return Change(node, machine()->Float64Sqrt());
+}
+
+
+Reduction JSIntrinsicLowering::ReduceSeqStringGetChar(
+ Node* node, String::Encoding encoding) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ node->set_op(
+ simplified()->LoadElement(AccessBuilder::ForSeqStringChar(encoding)));
+ node->ReplaceInput(2, effect);
+ node->ReplaceInput(3, control);
+ node->TrimInputCount(4);
+ NodeProperties::ReplaceWithValue(node, node, node);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceSeqStringSetChar(
+ Node* node, String::Encoding encoding) {
+ // Note: The intrinsic has a strange argument order, so we need to reshuffle.
+ Node* index = NodeProperties::GetValueInput(node, 0);
+ Node* chr = NodeProperties::GetValueInput(node, 1);
+ Node* string = NodeProperties::GetValueInput(node, 2);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ node->set_op(
+ simplified()->StoreElement(AccessBuilder::ForSeqStringChar(encoding)));
+ node->ReplaceInput(0, string);
+ node->ReplaceInput(1, index);
+ node->ReplaceInput(2, chr);
+ node->ReplaceInput(3, effect);
+ node->ReplaceInput(4, control);
+ node->TrimInputCount(5);
+ NodeProperties::RemoveBounds(node);
+ NodeProperties::ReplaceWithValue(node, string, node);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceStringGetLength(Node* node) {
+ Node* value = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ return Change(node, simplified()->LoadField(AccessBuilder::ForStringLength()),
+ value, effect, control);
+}
+
+
+Reduction JSIntrinsicLowering::ReduceValueOf(Node* node) {
// if (%_IsSmi(value)) {
// return value;
// } else if (%_GetInstanceType(%_GetMap(value)) == JS_VALUE_TYPE) {
@@ -145,7 +347,6 @@ Reduction JSIntrinsicLowering::ReduceInlineValueOf(Node* node) {
Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
-
// Replace all effect uses of {node} with the {ephi0}.
Node* ephi0 = graph()->NewNode(ephi_op, etrue0, efalse0, merge0);
NodeProperties::ReplaceWithValue(node, node, ephi0);
@@ -156,7 +357,7 @@ Reduction JSIntrinsicLowering::ReduceInlineValueOf(Node* node) {
Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op) {
- // Remove the effects from the node and update its effect usages.
+ // Replace all effect uses of {node} with the effect dependency.
NodeProperties::ReplaceWithValue(node, node);
// Remove the inputs corresponding to context, effect and control.
NodeProperties::RemoveNonValueInputs(node);
@@ -173,6 +374,14 @@ Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
node->ReplaceInput(1, b);
node->ReplaceInput(2, c);
node->TrimInputCount(3);
+ NodeProperties::ReplaceWithValue(node, node, node);
+ return Changed(node);
+}
+
+
+Reduction JSIntrinsicLowering::ChangeToUndefined(Node* node, Node* effect) {
+ NodeProperties::ReplaceWithValue(node, jsgraph()->UndefinedConstant(),
+ effect);
return Changed(node);
}
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index bc188caa06..35fb66147b 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -27,13 +27,28 @@ class JSIntrinsicLowering FINAL : public Reducer {
Reduction Reduce(Node* node) FINAL;
private:
- Reduction ReduceInlineIsSmi(Node* node);
- Reduction ReduceInlineIsNonNegativeSmi(Node* node);
- Reduction ReduceInlineIsInstanceType(Node* node, InstanceType instance_type);
- Reduction ReduceInlineValueOf(Node* node);
+ Reduction ReduceConstructDouble(Node* node);
+ Reduction ReduceDeoptimizeNow(Node* node);
+ Reduction ReduceDoubleHi(Node* node);
+ Reduction ReduceDoubleLo(Node* node);
+ Reduction ReduceHeapObjectGetMap(Node* node);
+ Reduction ReduceIncrementStatsCounter(Node* node);
+ Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
+ Reduction ReduceIsNonNegativeSmi(Node* node);
+ Reduction ReduceIsSmi(Node* node);
+ Reduction ReduceJSValueGetValue(Node* node);
+ Reduction ReduceMapGetInstanceType(Node* node);
+ Reduction ReduceMathClz32(Node* node);
+ Reduction ReduceMathFloor(Node* node);
+ Reduction ReduceMathSqrt(Node* node);
+ Reduction ReduceSeqStringGetChar(Node* node, String::Encoding encoding);
+ Reduction ReduceSeqStringSetChar(Node* node, String::Encoding encoding);
+ Reduction ReduceStringGetLength(Node* node);
+ Reduction ReduceValueOf(Node* node);
Reduction Change(Node* node, const Operator* op);
Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
+ Reduction ChangeToUndefined(Node* node, Node* effect = nullptr);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 72c39697b4..327da55bdd 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -239,7 +239,7 @@ const StoreNamedParameters& StoreNamedParametersOf(const Operator* op) {
V(HasProperty, Operator::kNoProperties, 2, 1) \
V(TypeOf, Operator::kPure, 1, 1) \
V(InstanceOf, Operator::kNoProperties, 2, 1) \
- V(Debugger, Operator::kNoProperties, 0, 0) \
+ V(StackCheck, Operator::kNoProperties, 0, 0) \
V(CreateFunctionContext, Operator::kNoProperties, 1, 1) \
V(CreateWithContext, Operator::kNoProperties, 2, 1) \
V(CreateBlockContext, Operator::kNoProperties, 2, 1) \
@@ -253,8 +253,9 @@ struct JSOperatorGlobalCache FINAL {
Name##Operator() \
: Operator(IrOpcode::kJS##Name, properties, "JS" #Name, \
value_input_count, Operator::ZeroIfPure(properties), \
- Operator::ZeroIfPure(properties), value_output_count, \
- Operator::ZeroIfPure(properties), 0) {} \
+ Operator::ZeroIfEliminatable(properties), \
+ value_output_count, Operator::ZeroIfPure(properties), \
+ Operator::ZeroIfNoThrow(properties)) {} \
}; \
Name##Operator k##Name##Operator;
CACHED_OP_LIST(CACHED)
@@ -265,7 +266,7 @@ struct JSOperatorGlobalCache FINAL {
StorePropertyOperator()
: Operator1<LanguageMode>(IrOpcode::kJSStoreProperty,
Operator::kNoProperties, "JSStoreProperty", 3,
- 1, 1, 0, 1, 0, kLanguageMode) {}
+ 1, 1, 0, 1, 2, kLanguageMode) {}
};
StorePropertyOperator<SLOPPY> kStorePropertySloppyOperator;
StorePropertyOperator<STRICT> kStorePropertyStrictOperator;
@@ -294,7 +295,7 @@ const Operator* JSOperatorBuilder::CallFunction(size_t arity,
return new (zone()) Operator1<CallFunctionParameters>( // --
IrOpcode::kJSCallFunction, Operator::kNoProperties, // opcode
"JSCallFunction", // name
- parameters.arity(), 1, 1, 1, 1, 0, // inputs/outputs
+ parameters.arity(), 1, 1, 1, 1, 2, // inputs/outputs
parameters); // parameter
}
@@ -307,7 +308,7 @@ const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id,
return new (zone()) Operator1<CallRuntimeParameters>( // --
IrOpcode::kJSCallRuntime, Operator::kNoProperties, // opcode
"JSCallRuntime", // name
- parameters.arity(), 1, 1, f->result_size, 1, 0, // inputs/outputs
+ parameters.arity(), 1, 1, f->result_size, 1, 2, // inputs/outputs
parameters); // parameter
}
@@ -316,7 +317,7 @@ const Operator* JSOperatorBuilder::CallConstruct(int arguments) {
return new (zone()) Operator1<int>( // --
IrOpcode::kJSCallConstruct, Operator::kNoProperties, // opcode
"JSCallConstruct", // name
- arguments, 1, 1, 1, 1, 0, // counts
+ arguments, 1, 1, 1, 1, 2, // counts
arguments); // parameter
}
@@ -328,7 +329,7 @@ const Operator* JSOperatorBuilder::LoadNamed(const Unique<Name>& name,
return new (zone()) Operator1<LoadNamedParameters>( // --
IrOpcode::kJSLoadNamed, Operator::kNoProperties, // opcode
"JSLoadNamed", // name
- 1, 1, 1, 1, 1, 0, // counts
+ 1, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
@@ -339,7 +340,7 @@ const Operator* JSOperatorBuilder::LoadProperty(
return new (zone()) Operator1<LoadPropertyParameters>( // --
IrOpcode::kJSLoadProperty, Operator::kNoProperties, // opcode
"JSLoadProperty", // name
- 2, 1, 1, 1, 1, 0, // counts
+ 2, 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
@@ -361,7 +362,7 @@ const Operator* JSOperatorBuilder::StoreNamed(LanguageMode language_mode,
return new (zone()) Operator1<StoreNamedParameters>( // --
IrOpcode::kJSStoreNamed, Operator::kNoProperties, // opcode
"JSStoreNamed", // name
- 2, 1, 1, 0, 1, 0, // counts
+ 2, 1, 1, 0, 1, 2, // counts
parameters); // parameter
}
@@ -370,7 +371,7 @@ const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
return new (zone()) Operator1<LanguageMode>( // --
IrOpcode::kJSDeleteProperty, Operator::kNoProperties, // opcode
"JSDeleteProperty", // name
- 2, 1, 1, 1, 1, 0, // counts
+ 2, 1, 1, 1, 1, 2, // counts
language_mode); // parameter
}
@@ -378,21 +379,23 @@ const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
const Operator* JSOperatorBuilder::LoadContext(size_t depth, size_t index,
bool immutable) {
ContextAccess access(depth, index, immutable);
- return new (zone()) Operator1<ContextAccess>( // --
- IrOpcode::kJSLoadContext, Operator::kNoWrite, // opcode
- "JSLoadContext", // name
- 1, 1, 0, 1, 1, 0, // counts
- access); // parameter
+ return new (zone()) Operator1<ContextAccess>( // --
+ IrOpcode::kJSLoadContext, // opcode
+ Operator::kNoWrite | Operator::kNoThrow, // flags
+ "JSLoadContext", // name
+ 1, 1, 0, 1, 1, 0, // counts
+ access); // parameter
}
const Operator* JSOperatorBuilder::StoreContext(size_t depth, size_t index) {
ContextAccess access(depth, index, false);
- return new (zone()) Operator1<ContextAccess>( // --
- IrOpcode::kJSStoreContext, Operator::kNoRead, // opcode
- "JSStoreContext", // name
- 2, 1, 1, 0, 1, 0, // counts
- access); // parameter
+ return new (zone()) Operator1<ContextAccess>( // --
+ IrOpcode::kJSStoreContext, // opcode
+ Operator::kNoRead | Operator::kNoThrow, // flags
+ "JSStoreContext", // name
+ 2, 1, 1, 0, 1, 0, // counts
+ access); // parameter
}
@@ -401,7 +404,7 @@ const Operator* JSOperatorBuilder::CreateCatchContext(
return new (zone()) Operator1<Unique<String>>( // --
IrOpcode::kJSCreateCatchContext, Operator::kNoProperties, // opcode
"JSCreateCatchContext", // name
- 2, 1, 1, 1, 1, 0, // counts
+ 2, 1, 1, 1, 1, 2, // counts
name); // parameter
}
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index e7fc04c1e1..32fe356a8a 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -252,7 +252,8 @@ class JSOperatorBuilder FINAL : public ZoneObject {
const Operator* TypeOf();
const Operator* InstanceOf();
- const Operator* Debugger();
+
+ const Operator* StackCheck();
// TODO(titzer): nail down the static parts of each of these context flavors.
const Operator* CreateFunctionContext();
diff --git a/deps/v8/src/compiler/js-type-feedback.cc b/deps/v8/src/compiler/js-type-feedback.cc
new file mode 100644
index 0000000000..bdd61df87e
--- /dev/null
+++ b/deps/v8/src/compiler/js-type-feedback.cc
@@ -0,0 +1,256 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-type-feedback.h"
+
+#include "src/property-details.h"
+
+#include "src/accessors.h"
+#include "src/ast.h"
+#include "src/type-info.h"
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum LoadOrStore { LOAD, STORE };
+
+JSTypeFeedbackTable::JSTypeFeedbackTable(Zone* zone)
+ : map_(TypeFeedbackIdMap::key_compare(),
+ TypeFeedbackIdMap::allocator_type(zone)) {}
+
+
+void JSTypeFeedbackTable::Record(Node* node, TypeFeedbackId id) {
+ map_.insert(std::make_pair(node->id(), id));
+}
+
+
+Reduction JSTypeFeedbackSpecializer::Reduce(Node* node) {
+ // TODO(turbofan): type feedback currently requires deoptimization.
+ if (!FLAG_turbo_deoptimization) return NoChange();
+ switch (node->opcode()) {
+ case IrOpcode::kJSLoadProperty:
+ return ReduceJSLoadProperty(node);
+ case IrOpcode::kJSLoadNamed:
+ return ReduceJSLoadNamed(node);
+ case IrOpcode::kJSStoreNamed:
+ return ReduceJSStoreNamed(node);
+ case IrOpcode::kJSStoreProperty:
+ return ReduceJSStoreProperty(node);
+ default:
+ break;
+ }
+ return NoChange();
+}
+
+
+static bool GetInObjectFieldAccess(LoadOrStore mode, Handle<Map> map,
+ Handle<Name> name, FieldAccess* access) {
+ access->base_is_tagged = kTaggedBase;
+ access->offset = -1;
+ access->name = name;
+ access->type = Type::Any();
+ access->machine_type = kMachAnyTagged;
+
+ // Check for properties that have accessors but are JSObject fields.
+ if (Accessors::IsJSObjectFieldAccessor(map, name, &access->offset)) {
+ // TODO(turbofan): fill in types for special JSObject field accesses.
+ return true;
+ }
+
+ // Check if the map is a dictionary.
+ if (map->is_dictionary_map()) return false;
+
+ // Search the descriptor array.
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int number = descriptors->SearchWithCache(*name, *map);
+ if (number == DescriptorArray::kNotFound) return false;
+ PropertyDetails property_details = descriptors->GetDetails(number);
+
+ bool is_smi = property_details.representation().IsSmi();
+ bool is_double = property_details.representation().IsDouble();
+
+ if (property_details.type() != DATA) {
+ // TODO(turbofan): constant loads and stores.
+ return false;
+ }
+
+ if (mode == STORE) {
+ if (property_details.IsReadOnly()) return false;
+ if (is_smi) {
+ // TODO(turbofan): SMI stores.
+ return false;
+ }
+ if (is_double) {
+ // TODO(turbofan): double stores.
+ return false;
+ }
+ } else {
+ // Check property details for loads.
+ if (is_smi) {
+ access->type = Type::SignedSmall();
+ access->machine_type = static_cast<MachineType>(kTypeInt32 | kRepTagged);
+ }
+ if (is_double) {
+ access->type = Type::Number();
+ access->machine_type = kMachFloat64;
+ }
+ }
+
+ int index = map->instance_descriptors()->GetFieldIndex(number);
+ FieldIndex field_index = FieldIndex::ForPropertyIndex(*map, index, is_double);
+
+ if (field_index.is_inobject()) {
+ access->offset = field_index.offset();
+ return true;
+ }
+
+ // TODO(turbofan): handle out of object properties.
+ return false;
+}
+
+
+Reduction JSTypeFeedbackSpecializer::ReduceJSLoadNamed(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadNamed);
+ TypeFeedbackId id = js_type_feedback_->find(node);
+ if (id.IsNone() || oracle()->LoadIsUninitialized(id)) return NoChange();
+
+ const LoadNamedParameters& p = LoadNamedParametersOf(node->op());
+ SmallMapList maps;
+ Handle<Name> name = p.name().handle();
+ Node* receiver = node->InputAt(0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ GatherReceiverTypes(receiver, effect, id, name, &maps);
+
+ if (maps.length() != 1) return NoChange(); // TODO(turbofan): polymorphism
+
+ Handle<Map> map = maps.first();
+ FieldAccess field_access;
+ if (!GetInObjectFieldAccess(LOAD, map, name, &field_access)) {
+ return NoChange();
+ }
+
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* check_success;
+ Node* check_failed;
+ BuildMapCheck(receiver, map, true, effect, control, &check_success,
+ &check_failed);
+
+ // Build the actual load.
+ Node* load = graph()->NewNode(simplified()->LoadField(field_access), receiver,
+ effect, check_success);
+
+ // TODO(turbofan): handle slow case instead of deoptimizing.
+ // TODO(titzer): frame state should be from before the load.
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state, effect,
+ check_failed);
+ NodeProperties::MergeControlToEnd(graph(), common(), deopt);
+ NodeProperties::ReplaceWithValue(node, load, load, check_success);
+ return Replace(load);
+}
+
+
+Reduction JSTypeFeedbackSpecializer::ReduceJSLoadProperty(Node* node) {
+ return NoChange();
+}
+
+
+Reduction JSTypeFeedbackSpecializer::ReduceJSStoreNamed(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kJSStoreNamed);
+ TypeFeedbackId id = js_type_feedback_->find(node);
+ if (id.IsNone() || oracle()->StoreIsUninitialized(id)) return NoChange();
+
+ const StoreNamedParameters& p = StoreNamedParametersOf(node->op());
+ SmallMapList maps;
+ Handle<Name> name = p.name().handle();
+ Node* receiver = node->InputAt(0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ GatherReceiverTypes(receiver, effect, id, name, &maps);
+
+ if (maps.length() != 1) return NoChange(); // TODO(turbofan): polymorphism
+
+ Handle<Map> map = maps.first();
+ FieldAccess field_access;
+ if (!GetInObjectFieldAccess(STORE, map, name, &field_access)) {
+ return NoChange();
+ }
+
+ Node* control = NodeProperties::GetControlInput(node);
+ Node* check_success;
+ Node* check_failed;
+ BuildMapCheck(receiver, map, true, effect, control, &check_success,
+ &check_failed);
+
+ // Build the actual load.
+ Node* value = node->InputAt(1);
+ Node* store = graph()->NewNode(simplified()->StoreField(field_access),
+ receiver, value, effect, check_success);
+
+ // TODO(turbofan): handle slow case instead of deoptimizing.
+ // TODO(titzer): frame state should be from before the store.
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, 0);
+ Node* deopt = graph()->NewNode(common()->Deoptimize(), frame_state, effect,
+ check_failed);
+ NodeProperties::MergeControlToEnd(graph(), common(), deopt);
+ NodeProperties::ReplaceWithValue(node, store, store, check_success);
+ return Replace(store);
+}
+
+
+Reduction JSTypeFeedbackSpecializer::ReduceJSStoreProperty(Node* node) {
+ return NoChange();
+}
+
+
+void JSTypeFeedbackSpecializer::BuildMapCheck(Node* receiver, Handle<Map> map,
+ bool smi_check, Node* effect,
+ Node* control, Node** success,
+ Node** fail) {
+ Node* if_smi = nullptr;
+ if (smi_check) {
+ Node* branch_smi = graph()->NewNode(
+ common()->Branch(BranchHint::kFalse),
+ graph()->NewNode(simplified()->ObjectIsSmi(), receiver), control);
+ if_smi = graph()->NewNode(common()->IfTrue(), branch_smi);
+ control = graph()->NewNode(common()->IfFalse(), branch_smi);
+ }
+
+ FieldAccess map_access = AccessBuilder::ForMap();
+ Node* receiver_map = graph()->NewNode(simplified()->LoadField(map_access),
+ receiver, effect, control);
+ Node* map_const = jsgraph_->Constant(map);
+ Node* cmp = graph()->NewNode(simplified()->ReferenceEqual(Type::Internal()),
+ receiver_map, map_const);
+ Node* branch =
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), cmp, control);
+ *success = graph()->NewNode(common()->IfTrue(), branch);
+ *fail = graph()->NewNode(common()->IfFalse(), branch);
+
+ if (if_smi) {
+ *fail = graph()->NewNode(common()->Merge(2), *fail, if_smi);
+ }
+}
+
+
+void JSTypeFeedbackSpecializer::GatherReceiverTypes(Node* receiver,
+ Node* effect,
+ TypeFeedbackId id,
+ Handle<Name> name,
+ SmallMapList* maps) {
+ // TODO(turbofan): filter maps by initial receiver map if known
+ // TODO(turbofan): filter maps by native context (if specializing)
+ // TODO(turbofan): filter maps by effect chain
+ oracle()->PropertyReceiverTypes(id, name, maps);
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/js-type-feedback.h b/deps/v8/src/compiler/js-type-feedback.h
new file mode 100644
index 0000000000..033da657ff
--- /dev/null
+++ b/deps/v8/src/compiler/js-type-feedback.h
@@ -0,0 +1,91 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_TYPE_FEEDBACK_H_
+#define V8_COMPILER_JS_TYPE_FEEDBACK_H_
+
+#include "src/utils.h"
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+class TypeFeedbackOracle;
+class SmallMapList;
+
+namespace compiler {
+
+// Stores type feedback information for nodes in the graph in a separate
+// data structure.
+class JSTypeFeedbackTable : public ZoneObject {
+ public:
+ explicit JSTypeFeedbackTable(Zone* zone);
+
+ // TODO(titzer): support recording the feedback vector slot.
+
+ void Record(Node* node, TypeFeedbackId id);
+
+ private:
+ friend class JSTypeFeedbackSpecializer;
+ typedef std::map<NodeId, TypeFeedbackId, std::less<NodeId>,
+ zone_allocator<TypeFeedbackId> > TypeFeedbackIdMap;
+
+ TypeFeedbackIdMap map_;
+
+ TypeFeedbackId find(Node* node) {
+ TypeFeedbackIdMap::const_iterator it = map_.find(node->id());
+ return it == map_.end() ? TypeFeedbackId::None() : it->second;
+ }
+};
+
+
+// Specializes a graph to the type feedback recorded in the
+// {js_type_feedback} provided to the constructor.
+class JSTypeFeedbackSpecializer : public Reducer {
+ public:
+ JSTypeFeedbackSpecializer(JSGraph* jsgraph,
+ JSTypeFeedbackTable* js_type_feedback,
+ TypeFeedbackOracle* oracle)
+ : jsgraph_(jsgraph),
+ simplified_(jsgraph->graph()->zone()),
+ js_type_feedback_(js_type_feedback),
+ oracle_(oracle) {
+ CHECK(js_type_feedback);
+ }
+
+ Reduction Reduce(Node* node) OVERRIDE;
+
+ // Visible for unit testing.
+ Reduction ReduceJSLoadNamed(Node* node);
+ Reduction ReduceJSLoadProperty(Node* node);
+ Reduction ReduceJSStoreNamed(Node* node);
+ Reduction ReduceJSStoreProperty(Node* node);
+
+ private:
+ JSGraph* jsgraph_;
+ SimplifiedOperatorBuilder simplified_;
+ JSTypeFeedbackTable* js_type_feedback_;
+ TypeFeedbackOracle* oracle_;
+
+ TypeFeedbackOracle* oracle() { return oracle_; }
+ Graph* graph() { return jsgraph_->graph(); }
+ CommonOperatorBuilder* common() { return jsgraph_->common(); }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ void BuildMapCheck(Node* receiver, Handle<Map> map, bool smi_check,
+ Node* effect, Node* control, Node** success, Node** fail);
+
+ void GatherReceiverTypes(Node* receiver, Node* effect, TypeFeedbackId id,
+ Handle<Name> property, SmallMapList* maps);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index bbe46fb029..244cfe2dba 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -19,36 +19,32 @@ namespace compiler {
// - relax effects from generic but not-side-effecting operations
-// Relax the effects of {node} by immediately replacing effect uses of {node}
-// with the effect input to {node}.
+// Relax the effects of {node} by immediately replacing effect and control uses
+// of {node} with the effect and control input to {node}.
// TODO(turbofan): replace the effect input to {node} with {graph->start()}.
// TODO(titzer): move into a GraphEditor?
-static void RelaxEffects(Node* node) {
+static void RelaxEffectsAndControls(Node* node) {
NodeProperties::ReplaceWithValue(node, node, NULL);
}
+// Relax the control uses of {node} by immediately replacing them with the
+// control input to {node}.
+// TODO(titzer): move into a GraphEditor?
+static void RelaxControls(Node* node) {
+ NodeProperties::ReplaceWithValue(node, node, node);
+}
+
+
JSTypedLowering::JSTypedLowering(JSGraph* jsgraph, Zone* zone)
: jsgraph_(jsgraph), simplified_(graph()->zone()), conversions_(zone) {
- zero_range_ = Type::Range(0.0, 1.0, graph()->zone());
+ zero_range_ = Type::Range(0.0, 0.0, graph()->zone());
one_range_ = Type::Range(1.0, 1.0, graph()->zone());
zero_thirtyone_range_ = Type::Range(0.0, 31.0, graph()->zone());
- // TODO(jarin): Can we have a correctification of the stupid type system?
- // These stupid work-arounds are just stupid!
- shifted_int32_ranges_[0] = Type::Signed32();
- if (SmiValuesAre31Bits()) {
- shifted_int32_ranges_[1] = Type::SignedSmall();
- for (size_t k = 2; k < arraysize(shifted_int32_ranges_); ++k) {
- double min = kMinInt / (1 << k);
- double max = kMaxInt / (1 << k);
- shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
- }
- } else {
- for (size_t k = 1; k < arraysize(shifted_int32_ranges_); ++k) {
- double min = kMinInt / (1 << k);
- double max = kMaxInt / (1 << k);
- shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
- }
+ for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
+ double min = kMinInt / (1 << k);
+ double max = kMaxInt / (1 << k);
+ shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
}
}
@@ -66,14 +62,36 @@ Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
class JSBinopReduction FINAL {
public:
JSBinopReduction(JSTypedLowering* lowering, Node* node)
- : lowering_(lowering),
- node_(node),
- left_type_(NodeProperties::GetBounds(node->InputAt(0)).upper),
- right_type_(NodeProperties::GetBounds(node->InputAt(1)).upper) {}
+ : lowering_(lowering), node_(node) {}
- void ConvertInputsToNumber() {
- node_->ReplaceInput(0, ConvertToNumber(left()));
- node_->ReplaceInput(1, ConvertToNumber(right()));
+ void ConvertPrimitiveInputsToNumber() {
+ node_->ReplaceInput(0, ConvertPrimitiveToNumber(left()));
+ node_->ReplaceInput(1, ConvertPrimitiveToNumber(right()));
+ }
+
+ void ConvertInputsToNumber(Node* frame_state) {
+ // To convert the inputs to numbers, we have to provide frame states
+ // for lazy bailouts in the ToNumber conversions.
+ // We use a little hack here: we take the frame state before the binary
+ // operation and use it to construct the frame states for the conversion
+ // so that after the deoptimization, the binary operation IC gets
+ // already converted values from full code. This way we are sure that we
+ // will not re-do any of the side effects.
+
+ Node* left_input =
+ left_type()->Is(Type::PlainPrimitive())
+ ? ConvertPrimitiveToNumber(left())
+ : ConvertToNumber(left(),
+ CreateFrameStateForLeftInput(frame_state));
+
+ Node* right_input =
+ right_type()->Is(Type::PlainPrimitive())
+ ? ConvertPrimitiveToNumber(right())
+ : ConvertToNumber(right(), CreateFrameStateForRightInput(
+ frame_state, left_input));
+
+ node_->ReplaceInput(0, left_input);
+ node_->ReplaceInput(1, right_input);
}
void ConvertInputsToUI32(Signedness left_signedness,
@@ -89,8 +107,9 @@ class JSBinopReduction FINAL {
// Convert inputs for bitwise shift operation (ES5 spec 11.7).
void ConvertInputsForShift(Signedness left_signedness) {
- node_->ReplaceInput(0, ConvertToUI32(left(), left_signedness));
- Node* rnum = ConvertToUI32(right(), kUnsigned);
+ node_->ReplaceInput(
+ 0, ConvertToUI32(ConvertPrimitiveToNumber(left()), left_signedness));
+ Node* rnum = ConvertToUI32(ConvertPrimitiveToNumber(right()), kUnsigned);
Type* rnum_type = NodeProperties::GetBounds(rnum).upper;
if (!rnum_type->Is(lowering_->zero_thirtyone_range_)) {
rnum = graph()->NewNode(machine()->Word32And(), rnum,
@@ -104,7 +123,6 @@ class JSBinopReduction FINAL {
Node* r = right();
node_->ReplaceInput(0, r);
node_->ReplaceInput(1, l);
- std::swap(left_type_, right_type_);
}
// Remove all effect and control inputs and outputs to this node and change
@@ -116,9 +134,9 @@ class JSBinopReduction FINAL {
DCHECK_EQ(0, op->ControlInputCount());
DCHECK_EQ(2, op->ValueInputCount());
- // Remove the effects from the node, if any, and update its effect usages.
+ // Remove the effects from the node, and update its effect/control usages.
if (node_->op()->EffectInputCount() > 0) {
- RelaxEffects(node_);
+ RelaxEffectsAndControls(node_);
}
// Remove the inputs corresponding to context, effect, and control.
NodeProperties::RemoveNonValueInputs(node_);
@@ -145,18 +163,18 @@ class JSBinopReduction FINAL {
return ChangeToPureOperator(op, false, type);
}
- bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); }
+ bool OneInputIs(Type* t) { return left_type()->Is(t) || right_type()->Is(t); }
bool BothInputsAre(Type* t) {
- return left_type_->Is(t) && right_type_->Is(t);
+ return left_type()->Is(t) && right_type()->Is(t);
}
bool OneInputCannotBe(Type* t) {
- return !left_type_->Maybe(t) || !right_type_->Maybe(t);
+ return !left_type()->Maybe(t) || !right_type()->Maybe(t);
}
bool NeitherInputCanBe(Type* t) {
- return !left_type_->Maybe(t) && !right_type_->Maybe(t);
+ return !left_type()->Maybe(t) && !right_type()->Maybe(t);
}
Node* effect() { return NodeProperties::GetEffectInput(node_); }
@@ -164,8 +182,12 @@ class JSBinopReduction FINAL {
Node* context() { return NodeProperties::GetContextInput(node_); }
Node* left() { return NodeProperties::GetValueInput(node_, 0); }
Node* right() { return NodeProperties::GetValueInput(node_, 1); }
- Type* left_type() { return left_type_; }
- Type* right_type() { return right_type_; }
+ Type* left_type() {
+ return NodeProperties::GetBounds(node_->InputAt(0)).upper;
+ }
+ Type* right_type() {
+ return NodeProperties::GetBounds(node_->InputAt(1)).upper;
+ }
SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
Graph* graph() const { return lowering_->graph(); }
@@ -177,8 +199,6 @@ class JSBinopReduction FINAL {
private:
JSTypedLowering* lowering_; // The containing lowering instance.
Node* node_; // The original node.
- Type* left_type_; // Cache of the left input's type.
- Type* right_type_; // Cache of the right input's type.
Node* ConvertToString(Node* node) {
// Avoid introducing too many eager ToString() operations.
@@ -190,27 +210,93 @@ class JSBinopReduction FINAL {
return n;
}
- Node* ConvertToNumber(Node* node) {
+ Node* CreateFrameStateForLeftInput(Node* frame_state) {
+ if (!FLAG_turbo_deoptimization) return nullptr;
+
+ FrameStateCallInfo state_info =
+ OpParameter<FrameStateCallInfo>(frame_state);
+ // If the frame state is already the right one, just return it.
+ if (state_info.state_combine().kind() == OutputFrameStateCombine::kPokeAt &&
+ state_info.state_combine().GetOffsetToPokeAt() == 1) {
+ return frame_state;
+ }
+
+ // Here, we smash the result of the conversion into the slot just below
+ // the stack top. This is the slot that full code uses to store the
+ // left operand.
+ const Operator* op = jsgraph()->common()->FrameState(
+ state_info.type(), state_info.bailout_id(),
+ OutputFrameStateCombine::PokeAt(1));
+
+ return graph()->NewNode(op, frame_state->InputAt(0),
+ frame_state->InputAt(1), frame_state->InputAt(2),
+ frame_state->InputAt(3), frame_state->InputAt(4));
+ }
+
+ Node* CreateFrameStateForRightInput(Node* frame_state, Node* converted_left) {
+ if (!FLAG_turbo_deoptimization) return nullptr;
+
+ FrameStateCallInfo state_info =
+ OpParameter<FrameStateCallInfo>(frame_state);
+
+ if (state_info.bailout_id() == BailoutId::None()) {
+ // Dummy frame state => just leave it as is.
+ return frame_state;
+ }
+
+ // Create a frame state that stores the result of the operation to the
+ // top of the stack (i.e., the slot used for the right operand).
+ const Operator* op = jsgraph()->common()->FrameState(
+ state_info.type(), state_info.bailout_id(),
+ OutputFrameStateCombine::PokeAt(0));
+
+ // Change the left operand {converted_left} on the expression stack.
+ Node* stack = frame_state->InputAt(2);
+ DCHECK_EQ(stack->opcode(), IrOpcode::kStateValues);
+ DCHECK_GE(stack->InputCount(), 2);
+
+ // TODO(jarin) Allocate in a local zone or a reusable buffer.
+ NodeVector new_values(stack->InputCount(), zone());
+ for (int i = 0; i < stack->InputCount(); i++) {
+ if (i == stack->InputCount() - 2) {
+ new_values[i] = converted_left;
+ } else {
+ new_values[i] = stack->InputAt(i);
+ }
+ }
+ Node* new_stack =
+ graph()->NewNode(stack->op(), stack->InputCount(), &new_values.front());
+
+ return graph()->NewNode(op, frame_state->InputAt(0),
+ frame_state->InputAt(1), new_stack,
+ frame_state->InputAt(3), frame_state->InputAt(4));
+ }
+
+ Node* ConvertPrimitiveToNumber(Node* node) {
+ return lowering_->ConvertPrimitiveToNumber(node);
+ }
+
+ Node* ConvertToNumber(Node* node, Node* frame_state) {
if (NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive())) {
- return lowering_->ConvertToNumber(node);
+ return ConvertPrimitiveToNumber(node);
+ } else if (!FLAG_turbo_deoptimization) {
+ // We cannot use ConvertToPrimitiveNumber here because we need context
+ // for converting general values.
+ Node* const n = graph()->NewNode(javascript()->ToNumber(), node,
+ context(), effect(), control());
+ update_effect(n);
+ return n;
+ } else {
+ Node* const n =
+ graph()->NewNode(javascript()->ToNumber(), node, context(),
+ frame_state, effect(), control());
+ update_effect(n);
+ return n;
}
- // TODO(jarin) This ToNumber conversion can deoptimize, but we do not really
- // have a frame state to deoptimize to. Either we provide such a frame state
- // or we exclude the values that could lead to deoptimization (e.g., by
- // triggering eager deopt if the value is not plain).
- Node* const n = FLAG_turbo_deoptimization
- ? graph()->NewNode(
- javascript()->ToNumber(), node, context(),
- jsgraph()->EmptyFrameState(), effect(), control())
- : graph()->NewNode(javascript()->ToNumber(), node,
- context(), effect(), control());
- update_effect(n);
- return n;
}
Node* ConvertToUI32(Node* node, Signedness signedness) {
// Avoid introducing too many eager NumberToXXnt32() operations.
- node = ConvertToNumber(node);
Type* type = NodeProperties::GetBounds(node).upper;
if (signedness == kSigned) {
if (!type->Is(Type::Signed32())) {
@@ -237,22 +323,15 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
// JSAdd(x:number, y:number) => NumberAdd(x, y)
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
- if (r.BothInputsAre(Type::Primitive()) &&
- r.NeitherInputCanBe(Type::StringOrReceiver())) {
+ if (r.NeitherInputCanBe(Type::StringOrReceiver())) {
// JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
- r.ConvertInputsToNumber();
+ Node* frame_state = FLAG_turbo_deoptimization
+ ? NodeProperties::GetFrameStateInput(node, 1)
+ : nullptr;
+ r.ConvertInputsToNumber(frame_state);
return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
}
#if 0
- // TODO(turbofan): General ToNumber disabled for now because:
- // a) The inserted ToNumber operation screws up observability of valueOf.
- // b) Deoptimization at ToNumber doesn't have corresponding bailout id.
- Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
- if (r.NeitherInputCanBe(maybe_string)) {
- ...
- }
-#endif
-#if 0
// TODO(turbofan): Lowering of StringAdd is disabled for now because:
// a) The inserted ToString operation screws up valueOf vs. toString order.
// b) Deoptimization at ToString doesn't have corresponding bailout id.
@@ -269,79 +348,25 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
}
-Reduction JSTypedLowering::ReduceJSBitwiseOr(Node* node) {
- JSBinopReduction r(this, node);
-
- // We can only reduce to Word32Or if we are sure the to-number conversions
- // cannot lazily deoptimize.
- bool shortcut_or_zero =
- !FLAG_turbo_deoptimization && r.OneInputIs(zero_range_);
- if (r.BothInputsAre(Type::Primitive()) || shortcut_or_zero) {
- // TODO(titzer): some Smi bitwise operations don't really require going
- // all the way to int32, which can save tagging/untagging for some
- // operations on some platforms.
- // TODO(turbofan): make this heuristic configurable for code size.
- r.ConvertInputsToUI32(kSigned, kSigned);
- return r.ChangeToPureOperator(machine()->Word32Or(), Type::Integral32());
- }
- return NoChange();
-}
-
-
-Reduction JSTypedLowering::ReduceJSMultiply(Node* node) {
- JSBinopReduction r(this, node);
-
- // We can only reduce to NumberMultiply if we are sure the to-number
- // conversions cannot lazily deoptimize.
- bool shortcut_multiply_one =
- !FLAG_turbo_deoptimization && r.OneInputIs(one_range_);
-
- if (r.BothInputsAre(Type::Primitive()) || shortcut_multiply_one) {
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(simplified()->NumberMultiply(),
- Type::Number());
- }
- // TODO(turbofan): relax/remove the effects of this operator in other cases.
- return NoChange();
-}
-
-
Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
const Operator* numberOp) {
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Primitive())) {
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(numberOp, Type::Number());
- }
-#if 0
- // TODO(turbofan): General ToNumber disabled for now because:
- // a) The inserted ToNumber operation screws up observability of valueOf.
- // b) Deoptimization at ToNumber doesn't have corresponding bailout id.
- if (r.OneInputIs(Type::Primitive())) {
- // If at least one input is a primitive, then insert appropriate conversions
- // to number and reduce this operator to the given numeric one.
- // TODO(turbofan): make this heuristic configurable for code size.
- r.ConvertInputsToNumber();
- return r.ChangeToPureOperator(numberOp);
- }
-#endif
- // TODO(turbofan): relax/remove the effects of this operator in other cases.
- return NoChange();
+ Node* frame_state = FLAG_turbo_deoptimization
+ ? NodeProperties::GetFrameStateInput(node, 1)
+ : nullptr;
+ r.ConvertInputsToNumber(frame_state);
+ return r.ChangeToPureOperator(numberOp, Type::Number());
}
Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
JSBinopReduction r(this, node);
- if (r.BothInputsAre(Type::Primitive())) {
- // TODO(titzer): some Smi bitwise operations don't really require going
- // all the way to int32, which can save tagging/untagging for some
- // operations
- // on some platforms.
- // TODO(turbofan): make this heuristic configurable for code size.
- r.ConvertInputsToUI32(kSigned, kSigned);
- return r.ChangeToPureOperator(intOp, Type::Integral32());
- }
- return NoChange();
+ Node* frame_state = FLAG_turbo_deoptimization
+ ? NodeProperties::GetFrameStateInput(node, 1)
+ : nullptr;
+ r.ConvertInputsToNumber(frame_state);
+ r.ConvertInputsToUI32(kSigned, kSigned);
+ return r.ChangeToPureOperator(intOp, Type::Integral32());
}
@@ -392,7 +417,7 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
...
}
#endif
- if (r.BothInputsAre(Type::Primitive()) &&
+ if (r.BothInputsAre(Type::PlainPrimitive()) &&
r.OneInputCannotBe(Type::StringOrReceiver())) {
const Operator* less_than;
const Operator* less_than_or_equal;
@@ -404,7 +429,7 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
less_than_or_equal = machine()->Int32LessThanOrEqual();
} else {
// TODO(turbofan): mixed signed/unsigned int32 comparisons.
- r.ConvertInputsToNumber();
+ r.ConvertPrimitiveInputsToNumber();
less_than = simplified()->NumberLessThan();
less_than_or_equal = simplified()->NumberLessThanOrEqual();
}
@@ -501,33 +526,64 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
Reduction JSTypedLowering::ReduceJSUnaryNot(Node* node) {
- Node* input = node->InputAt(0);
- Type* input_type = NodeProperties::GetBounds(input).upper;
+ Node* const input = node->InputAt(0);
+ Type* const input_type = NodeProperties::GetBounds(input).upper;
if (input_type->Is(Type::Boolean())) {
- // JSUnaryNot(x:boolean,context) => BooleanNot(x)
+ // JSUnaryNot(x:boolean) => BooleanNot(x)
node->set_op(simplified()->BooleanNot());
node->TrimInputCount(1);
return Changed(node);
+ } else if (input_type->Is(Type::OrderedNumber())) {
+ // JSUnaryNot(x:number) => NumberEqual(x,#0)
+ node->set_op(simplified()->NumberEqual());
+ node->ReplaceInput(1, jsgraph()->ZeroConstant());
+ node->TrimInputCount(2);
+ return Changed(node);
+ } else if (input_type->Is(Type::String())) {
+ // JSUnaryNot(x:string) => NumberEqual(x.length,#0)
+ FieldAccess const access = AccessBuilder::ForStringLength();
+ // It is safe for the load to be effect-free (i.e. not linked into effect
+ // chain) because we assume String::length to be immutable.
+ Node* length = graph()->NewNode(simplified()->LoadField(access), input,
+ graph()->start(), graph()->start());
+ node->set_op(simplified()->NumberEqual());
+ node->ReplaceInput(0, length);
+ node->ReplaceInput(1, jsgraph()->ZeroConstant());
+ node->TrimInputCount(2);
+ NodeProperties::ReplaceWithValue(node, node, length);
+ return Changed(node);
}
- // JSUnaryNot(x,context) => BooleanNot(AnyToBoolean(x))
- node->set_op(simplified()->BooleanNot());
- node->ReplaceInput(0, graph()->NewNode(simplified()->AnyToBoolean(), input));
- node->TrimInputCount(1);
- return Changed(node);
+ return NoChange();
}
Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
- Node* input = node->InputAt(0);
- Type* input_type = NodeProperties::GetBounds(input).upper;
+ Node* const input = node->InputAt(0);
+ Type* const input_type = NodeProperties::GetBounds(input).upper;
if (input_type->Is(Type::Boolean())) {
- // JSToBoolean(x:boolean,context) => x
+ // JSToBoolean(x:boolean) => x
return Replace(input);
+ } else if (input_type->Is(Type::OrderedNumber())) {
+ // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
+ node->set_op(simplified()->BooleanNot());
+ node->ReplaceInput(0, graph()->NewNode(simplified()->NumberEqual(), input,
+ jsgraph()->ZeroConstant()));
+ node->TrimInputCount(1);
+ return Changed(node);
+ } else if (input_type->Is(Type::String())) {
+ // JSToBoolean(x:string) => NumberLessThan(#0,x.length)
+ FieldAccess const access = AccessBuilder::ForStringLength();
+ // It is safe for the load to be effect-free (i.e. not linked into effect
+ // chain) because we assume String::length to be immutable.
+ Node* length = graph()->NewNode(simplified()->LoadField(access), input,
+ graph()->start(), graph()->start());
+ node->set_op(simplified()->NumberLessThan());
+ node->ReplaceInput(0, jsgraph()->ZeroConstant());
+ node->ReplaceInput(1, length);
+ node->TrimInputCount(2);
+ return Changed(node);
}
- // JSToBoolean(x,context) => AnyToBoolean(x)
- node->set_op(simplified()->AnyToBoolean());
- node->TrimInputCount(1);
- return Changed(node);
+ return NoChange();
}
@@ -584,7 +640,7 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
DCHECK(NodeProperties::IsControl(control));
DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Number()));
DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Number()));
- RelaxEffects(node);
+ RelaxEffectsAndControls(node);
node->set_op(common()->Phi(kMachAnyTagged, input_count));
for (int i = 0; i < input_count; ++i) {
// We must be very careful not to introduce cycles when pushing
@@ -593,7 +649,7 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
// to simply reuse the context of the {node}. However, ToNumber()
// does not require a context anyways, so it's safe to discard it
// here and pass the dummy context.
- Node* const value = ConvertToNumber(input->InputAt(i));
+ Node* const value = ConvertPrimitiveToNumber(input->InputAt(i));
if (i < node->InputCount()) {
node->ReplaceInput(i, value);
} else {
@@ -616,7 +672,7 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
DCHECK_EQ(3, input_count);
DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Number()));
DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Number()));
- RelaxEffects(node);
+ RelaxEffectsAndControls(node);
node->set_op(common()->Select(kMachAnyTagged, input_hint));
node->ReplaceInput(0, input->InputAt(0));
for (int i = 1; i < input_count; ++i) {
@@ -626,7 +682,7 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
// to simply reuse the context of the {node}. However, ToNumber()
// does not require a context anyways, so it's safe to discard it
// here and pass the dummy context.
- Node* const value = ConvertToNumber(input->InputAt(i));
+ Node* const value = ConvertPrimitiveToNumber(input->InputAt(i));
node->ReplaceInput(i, value);
}
node->TrimInputCount(input_count);
@@ -640,12 +696,13 @@ Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
NodeProperties::GetControlInput(node) != graph()->start()) {
// JSToNumber(x:plain-primitive,context,effect,control)
// => JSToNumber(x,no-context,start,start)
- RelaxEffects(node);
+ RelaxEffectsAndControls(node);
NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
NodeProperties::ReplaceControlInput(node, graph()->start());
NodeProperties::ReplaceEffectInput(node, graph()->start());
- if (OperatorProperties::HasFrameStateInput(node->op())) {
- NodeProperties::ReplaceFrameStateInput(node,
+ if (FLAG_turbo_deoptimization) {
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+ NodeProperties::ReplaceFrameStateInput(node, 0,
jsgraph()->EmptyFrameState());
}
return Changed(node);
@@ -767,11 +824,15 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
if (number_reduction.Changed()) {
value = number_reduction.replacement();
} else {
- if (OperatorProperties::HasFrameStateInput(
- javascript()->ToNumber())) {
+ DCHECK(FLAG_turbo_deoptimization ==
+ (OperatorProperties::GetFrameStateInputCount(
+ javascript()->ToNumber()) == 1));
+ if (FLAG_turbo_deoptimization) {
+ Node* frame_state_for_to_number =
+ NodeProperties::GetFrameStateInput(node, 1);
value = effect =
graph()->NewNode(javascript()->ToNumber(), value, context,
- jsgraph()->EmptyFrameState(), effect, control);
+ frame_state_for_to_number, effect, control);
} else {
value = effect = graph()->NewNode(javascript()->ToNumber(), value,
context, effect, control);
@@ -796,6 +857,7 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
node->ReplaceInput(3, effect);
node->ReplaceInput(4, control);
node->TrimInputCount(5);
+ RelaxControls(node);
return Changed(node);
}
// Compute byte offset.
@@ -809,6 +871,7 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
node->ReplaceInput(4, effect);
node->ReplaceInput(5, control);
node->TrimInputCount(6);
+ RelaxControls(node);
return Changed(node);
}
}
@@ -904,7 +967,7 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSGreaterThanOrEqual:
return ReduceJSComparison(node);
case IrOpcode::kJSBitwiseOr:
- return ReduceJSBitwiseOr(node);
+ return ReduceInt32Binop(node, machine()->Word32Or());
case IrOpcode::kJSBitwiseXor:
return ReduceInt32Binop(node, machine()->Word32Xor());
case IrOpcode::kJSBitwiseAnd:
@@ -920,7 +983,7 @@ Reduction JSTypedLowering::Reduce(Node* node) {
case IrOpcode::kJSSubtract:
return ReduceNumberBinop(node, simplified()->NumberSubtract());
case IrOpcode::kJSMultiply:
- return ReduceJSMultiply(node);
+ return ReduceNumberBinop(node, simplified()->NumberMultiply());
case IrOpcode::kJSDivide:
return ReduceNumberBinop(node, simplified()->NumberDivide());
case IrOpcode::kJSModulus:
@@ -948,7 +1011,7 @@ Reduction JSTypedLowering::Reduce(Node* node) {
}
-Node* JSTypedLowering::ConvertToNumber(Node* input) {
+Node* JSTypedLowering::ConvertPrimitiveToNumber(Node* input) {
DCHECK(NodeProperties::GetBounds(input).upper->Is(Type::PlainPrimitive()));
// Avoid inserting too many eager ToNumber() operations.
Reduction const reduction = ReduceJSToNumberInput(input);
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 838085e40c..d7e5b429ed 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -52,7 +52,7 @@ class JSTypedLowering FINAL : public Reducer {
Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
const Operator* shift_op);
- Node* ConvertToNumber(Node* input);
+ Node* ConvertPrimitiveToNumber(Node* input);
template <IrOpcode::Value>
Node* FindConversion(Node* input);
void InsertConversion(Node* conversion);
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
index 4242c957ed..e5b4595960 100644
--- a/deps/v8/src/compiler/jump-threading.cc
+++ b/deps/v8/src/compiler/jump-threading.cc
@@ -9,10 +9,10 @@ namespace v8 {
namespace internal {
namespace compiler {
-typedef BasicBlock::RpoNumber RpoNumber;
-
-#define TRACE(x) \
- if (FLAG_trace_turbo_jt) PrintF x
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_jt) PrintF(__VA_ARGS__); \
+ } while (false)
struct JumpThreadingState {
bool forwarded;
@@ -31,19 +31,19 @@ struct JumpThreadingState {
RpoNumber to_to = result[to.ToInt()];
bool pop = true;
if (to == from) {
- TRACE((" xx %d\n", from.ToInt()));
+ TRACE(" xx %d\n", from.ToInt());
result[from.ToInt()] = from;
} else if (to_to == unvisited()) {
- TRACE((" fw %d -> %d (recurse)\n", from.ToInt(), to.ToInt()));
+ TRACE(" fw %d -> %d (recurse)\n", from.ToInt(), to.ToInt());
stack.push(to);
result[to.ToInt()] = onstack();
pop = false; // recurse.
} else if (to_to == onstack()) {
- TRACE((" fw %d -> %d (cycle)\n", from.ToInt(), to.ToInt()));
+ TRACE(" fw %d -> %d (cycle)\n", from.ToInt(), to.ToInt());
result[from.ToInt()] = to; // break the cycle.
forwarded = true;
} else {
- TRACE((" fw %d -> %d (forward)\n", from.ToInt(), to.ToInt()));
+ TRACE(" fw %d -> %d (forward)\n", from.ToInt(), to.ToInt());
result[from.ToInt()] = to_to; // forward the block.
forwarded = true;
}
@@ -70,36 +70,36 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
while (!state.stack.empty()) {
InstructionBlock* block = code->InstructionBlockAt(state.stack.top());
// Process the instructions in a block up to a non-empty instruction.
- TRACE(("jt [%d] B%d RPO%d\n", static_cast<int>(stack.size()),
- block->id().ToInt(), block->rpo_number().ToInt()));
+ TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
+ block->rpo_number().ToInt());
bool fallthru = true;
RpoNumber fw = block->rpo_number();
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
if (instr->IsGapMoves() && GapInstruction::cast(instr)->IsRedundant()) {
// skip redundant gap moves.
- TRACE((" nop gap\n"));
+ TRACE(" nop gap\n");
continue;
} else if (instr->IsSourcePosition()) {
// skip source positions.
- TRACE((" src pos\n"));
+ TRACE(" src pos\n");
continue;
} else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
// can't skip instructions with flags continuations.
- TRACE((" flags\n"));
+ TRACE(" flags\n");
fallthru = false;
} else if (instr->IsNop()) {
// skip nops.
- TRACE((" nop\n"));
+ TRACE(" nop\n");
continue;
} else if (instr->arch_opcode() == kArchJmp) {
// try to forward the jump instruction.
- TRACE((" jmp\n"));
+ TRACE(" jmp\n");
fw = code->InputRpo(instr, 0);
fallthru = false;
} else {
// can't skip other instructions.
- TRACE((" other\n"));
+ TRACE(" other\n");
fallthru = false;
}
break;
@@ -120,14 +120,12 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
if (FLAG_trace_turbo_jt) {
for (int i = 0; i < static_cast<int>(result.size()); i++) {
- TRACE(("RPO%d B%d ", i,
- code->InstructionBlockAt(RpoNumber::FromInt(i))->id().ToInt()));
+ TRACE("B%d ", i);
int to = result[i].ToInt();
if (i != to) {
- TRACE(("-> B%d\n",
- code->InstructionBlockAt(RpoNumber::FromInt(to))->id().ToInt()));
+ TRACE("-> B%d\n", to);
} else {
- TRACE(("\n"));
+ TRACE("\n");
}
}
}
@@ -157,7 +155,7 @@ void JumpThreading::ApplyForwarding(ZoneVector<RpoNumber>& result,
} else if (instr->arch_opcode() == kArchJmp) {
if (skip[block_num]) {
// Overwrite a redundant jump with a nop.
- TRACE(("jt-fw nop @%d\n", i));
+ TRACE("jt-fw nop @%d\n", i);
instr->OverwriteWithNop();
}
fallthru = false; // jumps don't fall through to the next block.
diff --git a/deps/v8/src/compiler/jump-threading.h b/deps/v8/src/compiler/jump-threading.h
index b801fecc0a..fa74ee9a52 100644
--- a/deps/v8/src/compiler/jump-threading.h
+++ b/deps/v8/src/compiler/jump-threading.h
@@ -17,13 +17,12 @@ class JumpThreading {
public:
// Compute the forwarding map of basic blocks to their ultimate destination.
// Returns {true} if there is at least one block that is forwarded.
- static bool ComputeForwarding(Zone* local_zone,
- ZoneVector<BasicBlock::RpoNumber>& result,
+ static bool ComputeForwarding(Zone* local_zone, ZoneVector<RpoNumber>& result,
InstructionSequence* code);
// Rewrite the instructions to forward jumps and branches.
// May also negate some branches.
- static void ApplyForwarding(ZoneVector<BasicBlock::RpoNumber>& forwarding,
+ static void ApplyForwarding(ZoneVector<RpoNumber>& forwarding,
InstructionSequence* code);
};
diff --git a/deps/v8/src/compiler/linkage-impl.h b/deps/v8/src/compiler/linkage-impl.h
index abd0696837..98f8f1c5b7 100644
--- a/deps/v8/src/compiler/linkage-impl.h
+++ b/deps/v8/src/compiler/linkage-impl.h
@@ -57,12 +57,10 @@ class LinkageHelper {
// The target for JS function calls is the JSFunction object.
MachineType target_type = kMachAnyTagged;
- // Unoptimized code doesn't preserve the JSCallFunctionReg, so expect the
- // closure on the stack.
- LinkageLocation target_loc =
- is_osr ? stackloc(Linkage::kJSFunctionCallClosureParamIndex -
- js_parameter_count)
- : regloc(LinkageTraits::JSCallFunctionReg());
+ // TODO(titzer): When entering into an OSR function from unoptimized code,
+ // the JSFunction is not in a register, but it is on the stack in an
+ // unaddressable spill slot. We hack this in the OSR prologue. Fix.
+ LinkageLocation target_loc = regloc(LinkageTraits::JSCallFunctionReg());
return new (zone) CallDescriptor( // --
CallDescriptor::kCallJSFunction, // kind
target_type, // target MachineType
@@ -138,11 +136,13 @@ class LinkageHelper {
}
+ // TODO(all): Add support for return representations/locations to
+ // CallInterfaceDescriptor.
// TODO(turbofan): cache call descriptors for code stub calls.
static CallDescriptor* GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties, MachineType return_type) {
const int register_parameter_count =
descriptor.GetEnvironmentParameterCount();
const int js_parameter_count =
@@ -157,20 +157,23 @@ class LinkageHelper {
// Add return location.
AddReturnLocations(&locations);
- types.AddReturn(kMachAnyTagged);
+ types.AddReturn(return_type);
// Add parameters in registers and on the stack.
for (int i = 0; i < js_parameter_count; i++) {
if (i < register_parameter_count) {
// The first parameters go in registers.
Register reg = descriptor.GetEnvironmentParameterRegister(i);
+ Representation rep =
+ descriptor.GetEnvironmentParameterRepresentation(i);
locations.AddParam(regloc(reg));
+ types.AddParam(reptyp(rep));
} else {
// The rest of the parameters go on the stack.
int stack_slot = i - register_parameter_count - stack_parameter_count;
locations.AddParam(stackloc(stack_slot));
+ types.AddParam(kMachAnyTagged);
}
- types.AddParam(kMachAnyTagged);
}
// Add context.
locations.AddParam(regloc(LinkageTraits::ContextReg()));
@@ -232,6 +235,34 @@ class LinkageHelper {
DCHECK_LT(i, 0);
return LinkageLocation(i);
}
+
+ static MachineType reptyp(Representation representation) {
+ switch (representation.kind()) {
+ case Representation::kInteger8:
+ return kMachInt8;
+ case Representation::kUInteger8:
+ return kMachUint8;
+ case Representation::kInteger16:
+ return kMachInt16;
+ case Representation::kUInteger16:
+ return kMachUint16;
+ case Representation::kInteger32:
+ return kMachInt32;
+ case Representation::kSmi:
+ case Representation::kTagged:
+ case Representation::kHeapObject:
+ return kMachAnyTagged;
+ case Representation::kDouble:
+ return kMachFloat64;
+ case Representation::kExternal:
+ return kMachPtr;
+ case Representation::kNone:
+ case Representation::kNumRepresentations:
+ break;
+ }
+ UNREACHABLE();
+ return kMachNone;
+ }
};
@@ -254,7 +285,6 @@ LinkageLocation Linkage::GetOsrValueLocation(int index) const {
}
}
-
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index eedf9ed746..72b7dafe98 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -39,6 +39,14 @@ std::ostream& operator<<(std::ostream& os, const CallDescriptor& d) {
CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
+ if (info->code_stub() != NULL) {
+ // Use the code stub interface descriptor.
+ CallInterfaceDescriptor descriptor =
+ info->code_stub()->GetCallInterfaceDescriptor();
+ return GetStubCallDescriptor(info->isolate(), zone, descriptor, 0,
+ CallDescriptor::kNoFlags,
+ Operator::kNoProperties);
+ }
if (info->function() != NULL) {
// If we already have the function literal, use the number of parameters
// plus the receiver.
@@ -54,14 +62,6 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
1 + shared->internal_formal_parameter_count(),
CallDescriptor::kNoFlags);
}
- if (info->code_stub() != NULL) {
- // Use the code stub interface descriptor.
- CallInterfaceDescriptor descriptor =
- info->code_stub()->GetCallInterfaceDescriptor();
- return GetStubCallDescriptor(info->isolate(), zone, descriptor, 0,
- CallDescriptor::kNoFlags,
- Operator::kNoProperties);
- }
return NULL; // TODO(titzer): ?
}
@@ -114,22 +114,23 @@ bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
case Runtime::kNewArguments:
case Runtime::kNewClosure:
case Runtime::kNewFunctionContext:
+ case Runtime::kNewRestParamSlow:
case Runtime::kPushBlockContext:
case Runtime::kPushCatchContext:
case Runtime::kReThrow:
case Runtime::kSetProperty: // TODO(jarin): Is it safe?
- case Runtime::kStringCompare:
+ case Runtime::kStringCompareRT:
case Runtime::kStringEquals:
case Runtime::kToFastProperties: // TODO(jarin): Is it safe?
case Runtime::kTraceEnter:
case Runtime::kTraceExit:
case Runtime::kTypeof:
- case Runtime::kNewRestParamSlow:
return false;
case Runtime::kInlineArguments:
case Runtime::kInlineCallFunction:
case Runtime::kInlineDateField:
- case Runtime::kInlineOptimizedGetPrototype:
+ case Runtime::kInlineDeoptimizeNow:
+ case Runtime::kInlineGetPrototype:
case Runtime::kInlineRegExpExec:
return true;
default:
@@ -174,7 +175,7 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties, MachineType return_type) {
UNIMPLEMENTED();
return NULL;
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index a3bdbf9d5b..c1f3762655 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -56,11 +56,11 @@ class CallDescriptor FINAL : public ZoneObject {
};
enum Flag {
- // TODO(jarin) kLazyDeoptimization and kNeedsFrameState should be unified.
kNoFlags = 0u,
kNeedsFrameState = 1u << 0,
kPatchableCallSite = 1u << 1,
kNeedsNopAfterCall = 1u << 2,
+ kHasExceptionHandler = 1u << 3,
kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
};
typedef base::Flags<Flag> Flags;
@@ -191,7 +191,8 @@ class Linkage : public ZoneObject {
static CallDescriptor* GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties = Operator::kNoProperties);
+ Operator::Properties properties = Operator::kNoProperties,
+ MachineType return_type = kMachAnyTagged);
// Creates a call descriptor for simplified C calls that is appropriate
// for the host platform. This simplified calling convention only supports
diff --git a/deps/v8/src/compiler/liveness-analyzer.cc b/deps/v8/src/compiler/liveness-analyzer.cc
new file mode 100644
index 0000000000..301106d1c3
--- /dev/null
+++ b/deps/v8/src/compiler/liveness-analyzer.cc
@@ -0,0 +1,200 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/liveness-analyzer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/state-values-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+LivenessAnalyzer::LivenessAnalyzer(size_t local_count, Zone* zone)
+ : zone_(zone), blocks_(zone), local_count_(local_count), queue_(zone) {}
+
+
+void LivenessAnalyzer::Print(std::ostream& os) {
+ for (auto block : blocks_) {
+ block->Print(os);
+ os << std::endl;
+ }
+}
+
+
+LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock() {
+ LivenessAnalyzerBlock* result =
+ new (zone()->New(sizeof(LivenessAnalyzerBlock)))
+ LivenessAnalyzerBlock(blocks_.size(), local_count_, zone());
+ blocks_.push_back(result);
+ return result;
+}
+
+
+LivenessAnalyzerBlock* LivenessAnalyzer::NewBlock(
+ LivenessAnalyzerBlock* predecessor) {
+ LivenessAnalyzerBlock* result = NewBlock();
+ result->AddPredecessor(predecessor);
+ return result;
+}
+
+
+void LivenessAnalyzer::Queue(LivenessAnalyzerBlock* block) {
+ if (!block->IsQueued()) {
+ block->SetQueued();
+ queue_.push(block);
+ }
+}
+
+
+void LivenessAnalyzer::Run(NonLiveFrameStateSlotReplacer* replacer) {
+ if (local_count_ == 0) {
+ // No local variables => nothing to do.
+ return;
+ }
+
+ // Put all blocks into the queue.
+ DCHECK(queue_.empty());
+ for (auto block : blocks_) {
+ Queue(block);
+ }
+
+ // Compute the fix-point.
+ BitVector working_area(static_cast<int>(local_count_), zone_);
+ while (!queue_.empty()) {
+ LivenessAnalyzerBlock* block = queue_.front();
+ queue_.pop();
+ block->Process(&working_area, nullptr);
+
+ for (auto i = block->pred_begin(); i != block->pred_end(); i++) {
+ if ((*i)->UpdateLive(&working_area)) {
+ Queue(*i);
+ }
+ }
+ }
+
+ // Update the frame states according to the liveness.
+ for (auto block : blocks_) {
+ block->Process(&working_area, replacer);
+ }
+}
+
+LivenessAnalyzerBlock::LivenessAnalyzerBlock(size_t id, size_t local_count,
+ Zone* zone)
+ : entries_(zone),
+ predecessors_(zone),
+ live_(local_count == 0 ? 1 : static_cast<int>(local_count), zone),
+ queued_(false),
+ id_(id) {}
+
+void LivenessAnalyzerBlock::Process(BitVector* result,
+ NonLiveFrameStateSlotReplacer* replacer) {
+ queued_ = false;
+
+ // Copy the bitvector to the target bit vector.
+ result->CopyFrom(live_);
+
+ for (auto i = entries_.rbegin(); i != entries_.rend(); i++) {
+ auto entry = *i;
+ switch (entry.kind()) {
+ case Entry::kLookup:
+ result->Add(entry.var());
+ break;
+ case Entry::kBind:
+ result->Remove(entry.var());
+ break;
+ case Entry::kCheckpoint:
+ if (replacer != nullptr) {
+ replacer->ClearNonLiveFrameStateSlots(entry.node(), result);
+ }
+ break;
+ }
+ }
+}
+
+
+bool LivenessAnalyzerBlock::UpdateLive(BitVector* working_area) {
+ return live_.UnionIsChanged(*working_area);
+}
+
+
+void NonLiveFrameStateSlotReplacer::ClearNonLiveFrameStateSlots(
+ Node* frame_state, BitVector* liveness) {
+ DCHECK_EQ(frame_state->opcode(), IrOpcode::kFrameState);
+ Node* locals_state = frame_state->InputAt(1);
+ DCHECK_EQ(locals_state->opcode(), IrOpcode::kStateValues);
+ int count = static_cast<int>(StateValuesAccess(locals_state).size());
+ DCHECK_EQ(count == 0 ? 1 : count, liveness->length());
+ for (int i = 0; i < count; i++) {
+ bool live = liveness->Contains(i) || permanently_live_.Contains(i);
+ if (!live || locals_state->InputAt(i) != replacement_node_) {
+ Node* new_values = ClearNonLiveStateValues(locals_state, liveness);
+ frame_state->ReplaceInput(1, new_values);
+ break;
+ }
+ }
+}
+
+
+Node* NonLiveFrameStateSlotReplacer::ClearNonLiveStateValues(
+ Node* values, BitVector* liveness) {
+ DCHECK(inputs_buffer_.empty());
+ for (StateValuesAccess::TypedNode node : StateValuesAccess(values)) {
+ // Index of the next variable is its furure index in the inputs buffer,
+ // i.e., the buffer's size.
+ int var = static_cast<int>(inputs_buffer_.size());
+ bool live = liveness->Contains(var) || permanently_live_.Contains(var);
+ inputs_buffer_.push_back(live ? node.node : replacement_node_);
+ }
+ Node* result = state_values_cache()->GetNodeForValues(
+ inputs_buffer_.empty() ? nullptr : &(inputs_buffer_.front()),
+ inputs_buffer_.size());
+ inputs_buffer_.clear();
+ return result;
+}
+
+
+void LivenessAnalyzerBlock::Print(std::ostream& os) {
+ os << "Block " << id();
+ bool first = true;
+ for (LivenessAnalyzerBlock* pred : predecessors_) {
+ if (!first) {
+ os << ", ";
+ } else {
+ os << "; predecessors: ";
+ first = false;
+ }
+ os << pred->id();
+ }
+ os << std::endl;
+
+ for (auto entry : entries_) {
+ os << " ";
+ switch (entry.kind()) {
+ case Entry::kLookup:
+ os << "- Lookup " << entry.var() << std::endl;
+ break;
+ case Entry::kBind:
+ os << "- Bind " << entry.var() << std::endl;
+ break;
+ case Entry::kCheckpoint:
+ os << "- Checkpoint " << entry.node()->id() << std::endl;
+ break;
+ }
+ }
+
+ if (live_.length() > 0) {
+ os << " Live set: ";
+ for (int i = 0; i < live_.length(); i++) {
+ os << (live_.Contains(i) ? "L" : ".");
+ }
+ os << std::endl;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/liveness-analyzer.h b/deps/v8/src/compiler/liveness-analyzer.h
new file mode 100644
index 0000000000..1e2f85b45e
--- /dev/null
+++ b/deps/v8/src/compiler/liveness-analyzer.h
@@ -0,0 +1,146 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LIVENESS_ANAYZER_H_
+#define V8_COMPILER_LIVENESS_ANAYZER_H_
+
+#include "src/bit-vector.h"
+#include "src/compiler/node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LivenessAnalyzerBlock;
+class Node;
+class StateValuesCache;
+
+
+class NonLiveFrameStateSlotReplacer {
+ public:
+ void ClearNonLiveFrameStateSlots(Node* frame_state, BitVector* liveness);
+ NonLiveFrameStateSlotReplacer(StateValuesCache* state_values_cache,
+ Node* replacement, size_t local_count,
+ Zone* local_zone)
+ : replacement_node_(replacement),
+ state_values_cache_(state_values_cache),
+ local_zone_(local_zone),
+ permanently_live_(local_count == 0 ? 1 : static_cast<int>(local_count),
+ local_zone),
+ inputs_buffer_(local_zone) {}
+
+ void MarkPermanentlyLive(int var) { permanently_live_.Add(var); }
+
+ private:
+ Node* ClearNonLiveStateValues(Node* frame_state, BitVector* liveness);
+
+ StateValuesCache* state_values_cache() { return state_values_cache_; }
+ Zone* local_zone() { return local_zone_; }
+
+ // Node that replaces dead values.
+ Node* replacement_node_;
+ // Reference to state values cache so that we can create state values
+ // nodes.
+ StateValuesCache* state_values_cache_;
+
+ Zone* local_zone_;
+ BitVector permanently_live_;
+ NodeVector inputs_buffer_;
+};
+
+
+class LivenessAnalyzer {
+ public:
+ LivenessAnalyzer(size_t local_count, Zone* zone);
+
+ LivenessAnalyzerBlock* NewBlock();
+ LivenessAnalyzerBlock* NewBlock(LivenessAnalyzerBlock* predecessor);
+
+ void Run(NonLiveFrameStateSlotReplacer* relaxer);
+
+ Zone* zone() { return zone_; }
+
+ void Print(std::ostream& os);
+
+ size_t local_count() { return local_count_; }
+
+ private:
+ void Queue(LivenessAnalyzerBlock* block);
+
+ Zone* zone_;
+ ZoneDeque<LivenessAnalyzerBlock*> blocks_;
+ size_t local_count_;
+
+ ZoneQueue<LivenessAnalyzerBlock*> queue_;
+};
+
+
+class LivenessAnalyzerBlock {
+ public:
+ friend class LivenessAnalyzer;
+
+ void Lookup(int var) { entries_.push_back(Entry(Entry::kLookup, var)); }
+ void Bind(int var) { entries_.push_back(Entry(Entry::kBind, var)); }
+ void Checkpoint(Node* node) { entries_.push_back(Entry(node)); }
+ void AddPredecessor(LivenessAnalyzerBlock* b) { predecessors_.push_back(b); }
+
+ private:
+ class Entry {
+ public:
+ enum Kind { kBind, kLookup, kCheckpoint };
+
+ Kind kind() const { return kind_; }
+ Node* node() const {
+ DCHECK(kind() == kCheckpoint);
+ return node_;
+ }
+ int var() const {
+ DCHECK(kind() != kCheckpoint);
+ return var_;
+ }
+
+ explicit Entry(Node* node) : kind_(kCheckpoint), var_(-1), node_(node) {}
+ Entry(Kind kind, int var) : kind_(kind), var_(var), node_(nullptr) {
+ DCHECK(kind != kCheckpoint);
+ }
+
+ private:
+ Kind kind_;
+ int var_;
+ Node* node_;
+ };
+
+ LivenessAnalyzerBlock(size_t id, size_t local_count, Zone* zone);
+ void Process(BitVector* result, NonLiveFrameStateSlotReplacer* relaxer);
+ bool UpdateLive(BitVector* working_area);
+
+ void SetQueued() { queued_ = true; }
+ bool IsQueued() { return queued_; }
+
+ ZoneDeque<LivenessAnalyzerBlock*>::const_iterator pred_begin() {
+ return predecessors_.begin();
+ }
+ ZoneDeque<LivenessAnalyzerBlock*>::const_iterator pred_end() {
+ return predecessors_.end();
+ }
+
+ size_t id() { return id_; }
+ void Print(std::ostream& os);
+
+ ZoneDeque<Entry> entries_;
+ ZoneDeque<LivenessAnalyzerBlock*> predecessors_;
+
+ BitVector live_;
+ bool queued_;
+
+ size_t id_;
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_AST_GRAPH_BUILDER_H_
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 8f91d49f81..ba0b7a1893 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -160,29 +160,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
return ReduceWord32Shifts(node);
}
- case IrOpcode::kWord32Sar: {
- Int32BinopMatcher m(node);
- if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
- if (m.IsFoldable()) { // K >> K => K
- return ReplaceInt32(m.left().Value() >> m.right().Value());
- }
- if (m.left().IsWord32Shl()) {
- Int32BinopMatcher mleft(m.left().node());
- if (mleft.left().IsLoad()) {
- LoadRepresentation const rep =
- OpParameter<LoadRepresentation>(mleft.left().node());
- if (m.right().Is(24) && mleft.right().Is(24) && rep == kMachInt8) {
- // Load[kMachInt8] << 24 >> 24 => Load[kMachInt8]
- return Replace(mleft.left().node());
- }
- if (m.right().Is(16) && mleft.right().Is(16) && rep == kMachInt16) {
- // Load[kMachInt16] << 16 >> 16 => Load[kMachInt8]
- return Replace(mleft.left().node());
- }
- }
- }
- return ReduceWord32Shifts(node);
- }
+ case IrOpcode::kWord32Sar:
+ return ReduceWord32Sar(node);
case IrOpcode::kWord32Ror: {
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.left().node()); // x ror 0 => x
@@ -454,6 +433,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
break;
}
+ case IrOpcode::kFloat64InsertLowWord32:
+ return ReduceFloat64InsertLowWord32(node);
+ case IrOpcode::kFloat64InsertHighWord32:
+ return ReduceFloat64InsertHighWord32(node);
case IrOpcode::kStore:
return ReduceStore(node);
default:
@@ -471,6 +454,25 @@ Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
return ReplaceUint32(bit_cast<uint32_t>(m.left().Value()) +
bit_cast<uint32_t>(m.right().Value()));
}
+ if (m.left().IsInt32Sub()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.left().Is(0)) { // (0 - x) + y => y - x
+ node->set_op(machine()->Int32Sub());
+ node->ReplaceInput(0, m.right().node());
+ node->ReplaceInput(1, mleft.right().node());
+ Reduction const reduction = ReduceInt32Sub(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
+ if (m.right().IsInt32Sub()) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.left().Is(0)) { // y + (0 - x) => y - x
+ node->set_op(machine()->Int32Sub());
+ node->ReplaceInput(1, mright.right().node());
+ Reduction const reduction = ReduceInt32Sub(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ }
return NoChange();
}
@@ -784,11 +786,48 @@ Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
}
+Reduction MachineOperatorReducer::ReduceWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) return Replace(m.left().node()); // x >> 0 => x
+ if (m.IsFoldable()) { // K >> K => K
+ return ReplaceInt32(m.left().Value() >> m.right().Value());
+ }
+ if (m.left().IsWord32Shl()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.left().IsComparison()) {
+ if (m.right().Is(31) && mleft.right().Is(31)) {
+ // Comparison << 31 >> 31 => 0 - Comparison
+ node->set_op(machine()->Int32Sub());
+ node->ReplaceInput(0, Int32Constant(0));
+ node->ReplaceInput(1, mleft.left().node());
+ Reduction const reduction = ReduceInt32Sub(node);
+ return reduction.Changed() ? reduction : Changed(node);
+ }
+ } else if (mleft.left().IsLoad()) {
+ LoadRepresentation const rep =
+ OpParameter<LoadRepresentation>(mleft.left().node());
+ if (m.right().Is(24) && mleft.right().Is(24) && rep == kMachInt8) {
+ // Load[kMachInt8] << 24 >> 24 => Load[kMachInt8]
+ return Replace(mleft.left().node());
+ }
+ if (m.right().Is(16) && mleft.right().Is(16) && rep == kMachInt16) {
+ // Load[kMachInt16] << 16 >> 16 => Load[kMachInt8]
+ return Replace(mleft.left().node());
+ }
+ }
+ }
+ return ReduceWord32Shifts(node);
+}
+
+
Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
Int32BinopMatcher m(node);
if (m.right().Is(0)) return Replace(m.right().node()); // x & 0 => 0
if (m.right().Is(-1)) return Replace(m.left().node()); // x & -1 => x
+ if (m.left().IsComparison() && m.right().Is(1)) { // CMP & 1 => CMP
+ return Replace(m.left().node());
+ }
if (m.IsFoldable()) { // K & K => K
return ReplaceInt32(m.left().Value() & m.right().Value());
}
@@ -871,6 +910,12 @@ Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
return reduction.Changed() ? reduction : Changed(node);
}
}
+ } else if (m.left().IsInt32Mul()) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().IsMultipleOf(-mask)) {
+ // (x * (K << L)) & (-1 << L) => x * (K << L)
+ return Replace(mleft.node());
+ }
}
}
return NoChange();
@@ -934,6 +979,32 @@ Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
}
+Reduction MachineOperatorReducer::ReduceFloat64InsertLowWord32(Node* node) {
+ DCHECK_EQ(IrOpcode::kFloat64InsertLowWord32, node->opcode());
+ Float64Matcher mlhs(node->InputAt(0));
+ Uint32Matcher mrhs(node->InputAt(1));
+ if (mlhs.HasValue() && mrhs.HasValue()) {
+ return ReplaceFloat64(bit_cast<double>(
+ (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF00000000)) |
+ mrhs.Value()));
+ }
+ return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceFloat64InsertHighWord32(Node* node) {
+ DCHECK_EQ(IrOpcode::kFloat64InsertHighWord32, node->opcode());
+ Float64Matcher mlhs(node->InputAt(0));
+ Uint32Matcher mrhs(node->InputAt(1));
+ if (mlhs.HasValue() && mrhs.HasValue()) {
+ return ReplaceFloat64(bit_cast<double>(
+ (bit_cast<uint64_t>(mlhs.Value()) & V8_UINT64_C(0xFFFFFFFF)) |
+ (static_cast<uint64_t>(mrhs.Value()) << 32)));
+ }
+ return NoChange();
+}
+
+
CommonOperatorBuilder* MachineOperatorReducer::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/machine-operator-reducer.h b/deps/v8/src/compiler/machine-operator-reducer.h
index 9e02ffde72..7c41f143e2 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.h
+++ b/deps/v8/src/compiler/machine-operator-reducer.h
@@ -75,8 +75,11 @@ class MachineOperatorReducer FINAL : public Reducer {
Reduction ReduceProjection(size_t index, Node* node);
Reduction ReduceWord32Shifts(Node* node);
Reduction ReduceWord32Shl(Node* node);
+ Reduction ReduceWord32Sar(Node* node);
Reduction ReduceWord32And(Node* node);
Reduction ReduceWord32Or(Node* node);
+ Reduction ReduceFloat64InsertLowWord32(Node* node);
+ Reduction ReduceFloat64InsertHighWord32(Node* node);
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 2522a8e15d..fa8979c2fa 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -73,6 +73,7 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Word32Sar, Operator::kNoProperties, 2, 0, 1) \
V(Word32Ror, Operator::kNoProperties, 2, 0, 1) \
V(Word32Equal, Operator::kCommutative, 2, 0, 1) \
+ V(Word32Clz, Operator::kNoProperties, 1, 0, 1) \
V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
@@ -123,13 +124,18 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
V(Float64Div, Operator::kNoProperties, 2, 0, 1) \
V(Float64Mod, Operator::kNoProperties, 2, 0, 1) \
V(Float64Sqrt, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Ceil, Operator::kNoProperties, 1, 0, 1) \
- V(Float64Floor, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64RoundDown, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTruncate, Operator::kNoProperties, 1, 0, 1) \
V(Float64RoundTiesAway, Operator::kNoProperties, 1, 0, 1) \
V(Float64Equal, Operator::kCommutative, 2, 0, 1) \
V(Float64LessThan, Operator::kNoProperties, 2, 0, 1) \
V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64ExtractLowWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
+ V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Max, Operator::kNoProperties, 2, 0, 1) \
+ V(Float64Min, Operator::kNoProperties, 2, 0, 1) \
V(LoadStackPointer, Operator::kNoProperties, 0, 0, 1)
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 42f313019f..d428562517 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -74,13 +74,14 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
// for operations that are unsupported by some back-ends.
enum Flag {
kNoFlags = 0u,
- kFloat64Floor = 1u << 0,
- kFloat64Ceil = 1u << 1,
- kFloat64RoundTruncate = 1u << 2,
- kFloat64RoundTiesAway = 1u << 3,
- kInt32DivIsSafe = 1u << 4,
- kUint32DivIsSafe = 1u << 5,
- kWord32ShiftIsSafe = 1u << 6
+ kFloat64Max = 1u << 0,
+ kFloat64Min = 1u << 1,
+ kFloat64RoundDown = 1u << 2,
+ kFloat64RoundTruncate = 1u << 3,
+ kFloat64RoundTiesAway = 1u << 4,
+ kInt32DivIsSafe = 1u << 5,
+ kUint32DivIsSafe = 1u << 6,
+ kWord32ShiftIsSafe = 1u << 7
};
typedef base::Flags<Flag, unsigned> Flags;
@@ -95,6 +96,7 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
const Operator* Word32Sar();
const Operator* Word32Ror();
const Operator* Word32Equal();
+ const Operator* Word32Clz();
bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
const Operator* Word64And();
@@ -167,16 +169,26 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
const Operator* Float64LessThan();
const Operator* Float64LessThanOrEqual();
+ // Floating point min/max complying to IEEE 754.
+ const Operator* Float64Max();
+ const Operator* Float64Min();
+ bool HasFloat64Max() { return flags_ & kFloat64Max; }
+ bool HasFloat64Min() { return flags_ & kFloat64Min; }
+
// Floating point rounding.
- const Operator* Float64Floor();
- const Operator* Float64Ceil();
+ const Operator* Float64RoundDown();
const Operator* Float64RoundTruncate();
const Operator* Float64RoundTiesAway();
- bool HasFloat64Floor() { return flags_ & kFloat64Floor; }
- bool HasFloat64Ceil() { return flags_ & kFloat64Ceil; }
+ bool HasFloat64RoundDown() { return flags_ & kFloat64RoundDown; }
bool HasFloat64RoundTruncate() { return flags_ & kFloat64RoundTruncate; }
bool HasFloat64RoundTiesAway() { return flags_ & kFloat64RoundTiesAway; }
+ // Floating point bit representation.
+ const Operator* Float64ExtractLowWord32();
+ const Operator* Float64ExtractHighWord32();
+ const Operator* Float64InsertLowWord32();
+ const Operator* Float64InsertHighWord32();
+
// load [base + index]
const Operator* Load(LoadRepresentation rep);
@@ -226,10 +238,10 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
#undef PSEUDO_OP_LIST
private:
- Zone* zone_;
- const MachineOperatorGlobalCache& cache_;
- const MachineType word_;
- const Flags flags_;
+ Zone* const zone_;
+ MachineOperatorGlobalCache const& cache_;
+ MachineType const word_;
+ Flags const flags_;
DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
};
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 58c4581663..48ef0337f2 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -39,11 +39,11 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- FloatRegister OutputSingleRegister(int index = 0) {
+ FloatRegister OutputSingleRegister(size_t index = 0) {
return ToSingleRegister(instr_->OutputAt(index));
}
- FloatRegister InputSingleRegister(int index) {
+ FloatRegister InputSingleRegister(size_t index) {
return ToSingleRegister(instr_->InputAt(index));
}
@@ -53,7 +53,7 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
return ToDoubleRegister(op);
}
- Operand InputImmediate(int index) {
+ Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
@@ -78,7 +78,7 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
return Operand(zero_reg);
}
- Operand InputOperand(int index) {
+ Operand InputOperand(size_t index) {
InstructionOperand* op = instr_->InputAt(index);
if (op->IsRegister()) {
return Operand(ToRegister(op));
@@ -86,8 +86,8 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
return InputImmediate(index);
}
- MemOperand MemoryOperand(int* first_index) {
- const int index = *first_index;
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
break;
@@ -102,7 +102,7 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(no_reg);
}
- MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK(op != NULL);
@@ -116,7 +116,7 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
};
-static inline bool HasRegisterInput(Instruction* instr, int index) {
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
@@ -408,7 +408,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchCallJSFunction: {
@@ -422,7 +422,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchJmp:
@@ -437,6 +437,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchNop:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
@@ -490,6 +496,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMipsXor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMipsClz:
+ __ Clz(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMipsShl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -567,18 +576,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
- case kMipsFloat64Floor: {
+ case kMipsFloat64RoundDown: {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
break;
}
- case kMipsFloat64Ceil: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
- break;
- }
case kMipsFloat64RoundTruncate: {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
break;
}
+ case kMipsFloat64RoundUp: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ break;
+ }
case kMipsSqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -615,6 +624,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMipsFloat64ExtractLowWord32:
+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsFloat64ExtractHighWord32:
+ __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMipsFloat64InsertLowWord32:
+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kMipsFloat64InsertHighWord32:
+ __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
// ... more basic instructions ...
case kMipsLbu:
@@ -646,7 +667,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kMipsSwc1: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
@@ -803,7 +824,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
@@ -917,9 +938,10 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
@@ -962,6 +984,8 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ lw(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
stack_slots -= frame()->GetOsrStackSlotCount();
}
@@ -1150,9 +1174,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
__ ldc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
__ sw(temp_0, dst0);
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 3aa508f9d5..82639baab9 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -26,6 +26,7 @@ namespace compiler {
V(MipsAnd) \
V(MipsOr) \
V(MipsXor) \
+ V(MipsClz) \
V(MipsShl) \
V(MipsShr) \
V(MipsSar) \
@@ -40,9 +41,9 @@ namespace compiler {
V(MipsDivD) \
V(MipsModD) \
V(MipsSqrtD) \
- V(MipsFloat64Floor) \
- V(MipsFloat64Ceil) \
+ V(MipsFloat64RoundDown) \
V(MipsFloat64RoundTruncate) \
+ V(MipsFloat64RoundUp) \
V(MipsCvtSD) \
V(MipsCvtDS) \
V(MipsTruncWD) \
@@ -61,6 +62,10 @@ namespace compiler {
V(MipsSwc1) \
V(MipsLdc1) \
V(MipsSdc1) \
+ V(MipsFloat64ExtractLowWord32) \
+ V(MipsFloat64ExtractHighWord32) \
+ V(MipsFloat64InsertLowWord32) \
+ V(MipsFloat64InsertHighWord32) \
V(MipsPush) \
V(MipsStoreToStackSlot) \
V(MipsStackClaim) \
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index d723453c01..0e8df3e448 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -114,9 +114,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -265,6 +264,12 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsClz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
MipsOperandGenerator g(this);
@@ -402,6 +407,20 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ MipsOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kMipsFloat64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
VisitRRR(this, kMipsSubD, node);
}
@@ -423,19 +442,20 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
MipsOperandGenerator g(this);
Emit(kMipsSqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRR(this, kMipsFloat64Floor, node);
-}
-
-
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRR(this, kMipsFloat64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kMipsFloat64RoundDown, node);
}
@@ -449,7 +469,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
MipsOperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
@@ -476,6 +496,13 @@ void InstructionSelector::VisitCall(Node* node) {
slot--;
}
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler != nullptr) {
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -490,7 +517,7 @@ void InstructionSelector::VisitCall(Node* node) {
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(descriptor->flags());
+ opcode |= MiscField::encode(flags);
// Emit the call instruction.
InstructionOperand* first_output =
@@ -600,8 +627,7 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
// TODO(plind): Revisit and test this path.
@@ -730,8 +756,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -746,63 +771,31 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
-void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
- BasicBlock** case_branches,
- int32_t* case_values, size_t case_count,
- int32_t min_value, int32_t max_value) {
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
MipsOperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- InstructionOperand default_operand = g.Label(default_branch);
-
- // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
- // is 2^31-1, so don't assume that it's non-zero below.
- size_t value_range =
- 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
-
- // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
- // instruction.
- size_t table_space_cost = 9 + value_range;
- size_t table_time_cost = 9;
- size_t lookup_space_cost = 2 + 2 * case_count;
- size_t lookup_time_cost = case_count;
- if (case_count > 0 &&
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 9 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
InstructionOperand index_operand = value_operand;
- if (min_value) {
+ if (sw.min_value) {
index_operand = g.TempRegister();
- Emit(kMipsSub, index_operand, value_operand, g.TempImmediate(min_value));
- }
- size_t input_count = 2 + value_range;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = index_operand;
- std::fill(&inputs[1], &inputs[input_count], default_operand);
- for (size_t index = 0; index < case_count; ++index) {
- size_t value = case_values[index] - min_value;
- BasicBlock* branch = case_branches[index];
- DCHECK_LE(0u, value);
- DCHECK_LT(value + 2, input_count);
- inputs[value + 2] = g.Label(branch);
+ Emit(kMipsSub, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
}
- Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
- return;
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
- size_t input_count = 2 + case_count * 2;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = value_operand;
- inputs[1] = default_operand;
- for (size_t index = 0; index < case_count; ++index) {
- int32_t value = case_values[index];
- BasicBlock* branch = case_branches[index];
- inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
- inputs[index * 2 + 2 + 1] = g.Label(branch);
- }
- Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
+ return EmitLookupSwitch(sw, value_operand);
}
@@ -878,12 +871,43 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ MipsOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ return MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
}
return MachineOperatorBuilder::kNoFlags;
diff --git a/deps/v8/src/compiler/mips/linkage-mips.cc b/deps/v8/src/compiler/mips/linkage-mips.cc
index cbb59d3a10..9480b73eae 100644
--- a/deps/v8/src/compiler/mips/linkage-mips.cc
+++ b/deps/v8/src/compiler/mips/linkage-mips.cc
@@ -51,9 +51,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties, MachineType return_type) {
return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties);
+ stack_parameter_count, flags, properties,
+ return_type);
}
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index 60e016fa22..f620487931 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -38,11 +38,11 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- FloatRegister OutputSingleRegister(int index = 0) {
+ FloatRegister OutputSingleRegister(size_t index = 0) {
return ToSingleRegister(instr_->OutputAt(index));
}
- FloatRegister InputSingleRegister(int index) {
+ FloatRegister InputSingleRegister(size_t index) {
return ToSingleRegister(instr_->InputAt(index));
}
@@ -52,7 +52,7 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
return ToDoubleRegister(op);
}
- Operand InputImmediate(int index) {
+ Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
@@ -78,7 +78,7 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
return Operand(zero_reg);
}
- Operand InputOperand(int index) {
+ Operand InputOperand(size_t index) {
InstructionOperand* op = instr_->InputAt(index);
if (op->IsRegister()) {
return Operand(ToRegister(op));
@@ -86,8 +86,8 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
return InputImmediate(index);
}
- MemOperand MemoryOperand(int* first_index) {
- const int index = *first_index;
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
switch (AddressingModeField::decode(instr_->opcode())) {
case kMode_None:
break;
@@ -102,7 +102,7 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(no_reg);
}
- MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
MemOperand ToMemOperand(InstructionOperand* op) const {
DCHECK(op != NULL);
@@ -116,7 +116,7 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
};
-static inline bool HasRegisterInput(Instruction* instr, int index) {
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
@@ -408,7 +408,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchCallJSFunction: {
@@ -422,7 +422,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchJmp:
@@ -437,6 +437,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchNop:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
@@ -505,6 +511,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kMips64Xor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
+ case kMips64Clz:
+ __ Clz(i.OutputRegister(), i.InputRegister(0));
+ break;
case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
@@ -580,11 +589,9 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
case kMips64Tst:
- case kMips64Tst32:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kMips64Cmp:
- case kMips64Cmp32:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
case kMips64Mov:
@@ -631,18 +638,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
- case kMips64Float64Floor: {
+ case kMips64Float64RoundDown: {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
break;
}
- case kMips64Float64Ceil: {
- ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
- break;
- }
case kMips64Float64RoundTruncate: {
ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
break;
}
+ case kMips64Float64RoundUp: {
+ ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+ break;
+ }
case kMips64SqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
@@ -679,6 +686,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
break;
}
+ case kMips64Float64ExtractLowWord32:
+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMips64Float64ExtractHighWord32:
+ __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kMips64Float64InsertLowWord32:
+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kMips64Float64InsertHighWord32:
+ __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
// ... more basic instructions ...
case kMips64Lbu:
@@ -716,7 +735,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
break;
}
case kMips64Swc1: {
- int index = 0;
+ size_t index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
@@ -808,25 +827,13 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
// implemented differently than on the other arch's. The compare operations
// emit mips psuedo-instructions, which are handled here by branch
// instructions that do the actual comparison. Essential that the input
- // registers to compare psuedo-op are not modified before this branch op, as
+ // registers to compare pseudo-op are not modified before this branch op, as
// they are tested here.
- // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
- // not separated by other instructions.
if (instr->arch_opcode() == kMips64Tst) {
cc = FlagsConditionToConditionTst(branch->condition);
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
- } else if (instr->arch_opcode() == kMips64Tst32) {
- cc = FlagsConditionToConditionTst(branch->condition);
- // Zero-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- // This is a disadvantage to perform 32-bit operation on MIPS64.
- // Try to force globally in front-end Word64 representation to be preferred
- // for MIPS64 even for Word32.
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Dext(at, at, 0, 32);
- __ Branch(tlabel, cc, at, Operand(zero_reg));
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
cc = FlagsConditionToConditionOvf(branch->condition);
@@ -839,42 +846,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
-
- } else if (instr->arch_opcode() == kMips64Cmp32) {
- cc = FlagsConditionToConditionCmp(branch->condition);
-
- switch (branch->condition) {
- case kEqual:
- case kNotEqual:
- case kSignedLessThan:
- case kSignedGreaterThanOrEqual:
- case kSignedLessThanOrEqual:
- case kSignedGreaterThan:
- // Sign-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ sll(i.InputRegister(0), i.InputRegister(0), 0);
- if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(1), i.InputRegister(1), 0);
- }
- break;
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- case kUnsignedLessThanOrEqual:
- case kUnsignedGreaterThan:
- // Zero-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
- if (instr->InputAt(1)->IsRegister()) {
- __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
- }
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp, branch->condition);
- break;
- }
- __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
-
- if (!branch->fallthru) __ Branch(flabel); // no fallthru to flabel.
} else if (instr->arch_opcode() == kMips64CmpD) {
// TODO(dusmil) optimize unordered checks to use less instructions
// even if we have to unfold BranchF macro.
@@ -917,7 +888,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
}
@@ -948,14 +919,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot.
- } else if (instr->arch_opcode() == kMips64Tst32) {
- cc = FlagsConditionToConditionTst(condition);
- // Zero-extend register on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ And(at, i.InputRegister(0), i.InputOperand(1));
- __ Dext(at, at, 0, 32);
- __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
- __ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64Dadd ||
instr->arch_opcode() == kMips64Dsub) {
cc = FlagsConditionToConditionOvf(condition);
@@ -969,42 +932,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = FlagsConditionToConditionCmp(condition);
__ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot.
- } else if (instr->arch_opcode() == kMips64Cmp32) {
- Register left = i.InputRegister(0);
- Operand right = i.InputOperand(1);
- cc = FlagsConditionToConditionCmp(condition);
-
- switch (condition) {
- case kEqual:
- case kNotEqual:
- case kSignedLessThan:
- case kSignedGreaterThanOrEqual:
- case kSignedLessThanOrEqual:
- case kSignedGreaterThan:
- // Sign-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ sll(left, left, 0);
- if (instr->InputAt(1)->IsRegister()) {
- __ sll(i.InputRegister(1), i.InputRegister(1), 0);
- }
- break;
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- case kUnsignedLessThanOrEqual:
- case kUnsignedGreaterThan:
- // Zero-extend registers on MIPS64 only 64-bit operand
- // branch and compare op. is available.
- __ Dext(left, left, 0, 32);
- if (instr->InputAt(1)->IsRegister()) {
- __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
- }
- break;
- default:
- UNSUPPORTED_COND(kMips64Cmp32, condition);
- break;
- }
- __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
- __ li(result, Operand(1)); // In delay slot.
} else if (instr->arch_opcode() == kMips64CmpD) {
FPURegister left = i.InputDoubleRegister(0);
FPURegister right = i.InputDoubleRegister(1);
@@ -1077,9 +1004,10 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
@@ -1122,6 +1050,8 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
stack_slots -= frame()->GetOsrStackSlotCount();
}
@@ -1310,9 +1240,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
Register temp_0 = kScratchReg;
FPURegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
- MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+ MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
- MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
__ ldc1(temp_1, dst0); // Save destination in temp_1.
__ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
__ sw(temp_0, dst0);
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index dd019f9e5a..b184018bd7 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -11,74 +11,77 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64Mul) \
- V(Mips64MulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64Or) \
- V(Mips64Xor) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Dext) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Tst32) \
- V(Mips64Cmp) \
- V(Mips64Cmp32) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64SqrtD) \
- V(Mips64Float64Floor) \
- V(Mips64Float64Ceil) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64TruncUwD) \
- V(Mips64CvtDW) \
- V(Mips64CvtDUw) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Lhu) \
- V(Mips64Sh) \
- V(Mips64Ld) \
- V(Mips64Lw) \
- V(Mips64Sw) \
- V(Mips64Sd) \
- V(Mips64Lwc1) \
- V(Mips64Swc1) \
- V(Mips64Ldc1) \
- V(Mips64Sdc1) \
- V(Mips64Push) \
- V(Mips64StoreToStackSlot) \
- V(Mips64StackClaim) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64Mul) \
+ V(Mips64MulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64Or) \
+ V(Mips64Xor) \
+ V(Mips64Clz) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Dext) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64SqrtD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64TruncUwD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtDUw) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Lhu) \
+ V(Mips64Sh) \
+ V(Mips64Ld) \
+ V(Mips64Lw) \
+ V(Mips64Sw) \
+ V(Mips64Sd) \
+ V(Mips64Lwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Push) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64StackClaim) \
V(Mips64StoreWriteBarrier)
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 779f786468..a0a6e22703 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -57,35 +57,6 @@ class Mips64OperandGenerator FINAL : public OperandGenerator {
}
}
-
- bool CanBeImmediate(Node* node, InstructionCode opcode,
- FlagsContinuation* cont) {
- int64_t value;
- if (node->opcode() == IrOpcode::kInt32Constant)
- value = OpParameter<int32_t>(node);
- else if (node->opcode() == IrOpcode::kInt64Constant)
- value = OpParameter<int64_t>(node);
- else
- return false;
- switch (ArchOpcodeField::decode(opcode)) {
- case kMips64Cmp32:
- switch (cont->condition()) {
- case kUnsignedLessThan:
- case kUnsignedGreaterThanOrEqual:
- case kUnsignedLessThanOrEqual:
- case kUnsignedGreaterThan:
- // Immediate operands for unsigned 32-bit compare operations
- // should not be sign-extended.
- return is_uint15(value);
- default:
- return false;
- }
- default:
- return is_int16(value);
- }
- }
-
-
private:
bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
TRACE_UNIMPL();
@@ -147,9 +118,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -334,6 +304,12 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitWord64Ror(Node* node) {
VisitRRO(this, kMips64Dror, node);
}
@@ -581,6 +557,20 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
+ Mips64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kMips64Float64RoundUp, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
VisitRRR(this, kMips64SubD, node);
}
@@ -603,19 +593,20 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
+void InstructionSelector::VisitFloat64Max(Node* node) { UNREACHABLE(); }
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) { UNREACHABLE(); }
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
Mips64OperandGenerator g(this);
Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRR(this, kMips64Float64Floor, node);
-}
-
-
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRR(this, kMips64Float64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kMips64Float64RoundDown, node);
}
@@ -629,7 +620,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
Mips64OperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
@@ -656,6 +647,13 @@ void InstructionSelector::VisitCall(Node* node) {
slot--;
}
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler != nullptr) {
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -670,7 +668,7 @@ void InstructionSelector::VisitCall(Node* node) {
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(descriptor->flags());
+ opcode |= MiscField::encode(flags);
// Emit the call instruction.
Instruction* call_instr =
@@ -779,8 +777,7 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -808,10 +805,10 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Node* right = node->InputAt(1);
// Match immediates on left or right side of comparison.
- if (g.CanBeImmediate(right, opcode, cont)) {
+ if (g.CanBeImmediate(right, opcode)) {
VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
cont);
- } else if (g.CanBeImmediate(left, opcode, cont)) {
+ } else if (g.CanBeImmediate(left, opcode)) {
if (!commutative) cont->Commute();
VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
cont);
@@ -824,7 +821,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
- VisitWordCompare(selector, node, kMips64Cmp32, cont, false);
+ VisitWordCompare(selector, node, kMips64Cmp, cont, false);
}
@@ -836,15 +833,14 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
} // namespace
-void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
- Node* value, FlagsContinuation* cont) {
+void EmitWordCompareZero(InstructionSelector* selector, Node* value,
+ FlagsContinuation* cont) {
Mips64OperandGenerator g(selector);
- opcode = cont->Encode(opcode);
+ InstructionCode opcode = cont->Encode(kMips64Cmp);
InstructionOperand const value_operand = g.UseRegister(value);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
g.TempImmediate(0));
@@ -855,13 +851,7 @@ void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
// Shared routine for word comparisons against zero.
void VisitWordCompareZero(InstructionSelector* selector, Node* user,
Node* value, FlagsContinuation* cont) {
- // Initially set comparison against 0 to be 64-bit variant for branches that
- // cannot combine.
- InstructionCode opcode = kMips64Cmp;
while (selector->CanCover(user, value)) {
- if (user->opcode() == IrOpcode::kWord32Equal) {
- opcode = kMips64Cmp32;
- }
switch (value->opcode()) {
case IrOpcode::kWord32Equal: {
// Combine with comparisons against 0 by simply inverting the
@@ -871,7 +861,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
user = value;
value = m.left().node();
cont->Negate();
- opcode = kMips64Cmp32;
continue;
}
cont->OverwriteAndNegateIfEqual(kEqual);
@@ -946,7 +935,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
break;
case IrOpcode::kWord32And:
- return VisitWordCompare(selector, value, kMips64Tst32, cont, true);
case IrOpcode::kWord64And:
return VisitWordCompare(selector, value, kMips64Tst, cont, true);
default:
@@ -956,7 +944,7 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
}
// Continuation could not be combined with a compare, emit compare against 0.
- EmitWordCompareZero(selector, opcode, value, cont);
+ EmitWordCompareZero(selector, value, cont);
}
@@ -967,64 +955,31 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
-void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
- BasicBlock** case_branches,
- int32_t* case_values, size_t case_count,
- int32_t min_value, int32_t max_value) {
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
Mips64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- InstructionOperand default_operand = g.Label(default_branch);
-
- // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
- // is 2^31-1, so don't assume that it's non-zero below.
- size_t value_range =
- 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
-
- // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
- // instruction.
- size_t table_space_cost = 10 + 2 * value_range;
- size_t table_time_cost = 10;
- size_t lookup_space_cost = 2 + 2 * case_count;
- size_t lookup_time_cost = case_count;
- if (case_count > 0 &&
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 10 + 2 * sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
InstructionOperand index_operand = value_operand;
- if (min_value) {
+ if (sw.min_value) {
index_operand = g.TempRegister();
Emit(kMips64Sub, index_operand, value_operand,
- g.TempImmediate(min_value));
+ g.TempImmediate(sw.min_value));
}
- size_t input_count = 2 + value_range;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = index_operand;
- std::fill(&inputs[1], &inputs[input_count], default_operand);
- for (size_t index = 0; index < case_count; ++index) {
- size_t value = case_values[index] - min_value;
- BasicBlock* branch = case_branches[index];
- DCHECK_LE(0u, value);
- DCHECK_LT(value + 2, input_count);
- inputs[value + 2] = g.Label(branch);
- }
- Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
- return;
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
- size_t input_count = 2 + case_count * 2;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = value_operand;
- inputs[1] = default_operand;
- for (size_t index = 0; index < case_count; ++index) {
- int32_t value = case_values[index];
- BasicBlock* branch = case_branches[index];
- inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
- inputs[index * 2 + 2 + 1] = g.Label(branch);
- }
- Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
+ return EmitLookupSwitch(sw, value_operand);
}
@@ -1130,11 +1085,42 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Float64ExtractLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Emit(kMips64Float64ExtractHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMips64Float64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ Mips64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kMips64Float64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ return MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate;
}
diff --git a/deps/v8/src/compiler/mips64/linkage-mips64.cc b/deps/v8/src/compiler/mips64/linkage-mips64.cc
index 273054e206..6fed0617d0 100644
--- a/deps/v8/src/compiler/mips64/linkage-mips64.cc
+++ b/deps/v8/src/compiler/mips64/linkage-mips64.cc
@@ -51,9 +51,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties, MachineType return_type) {
return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties);
+ stack_parameter_count, flags, properties,
+ return_type);
}
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
index f4e0513775..2c4c720d26 100644
--- a/deps/v8/src/compiler/move-optimizer.cc
+++ b/deps/v8/src/compiler/move-optimizer.cc
@@ -10,31 +10,9 @@ namespace compiler {
namespace {
-MoveOperands* PrepareInsertAfter(ParallelMove* left, MoveOperands* move,
- Zone* zone) {
- auto move_ops = left->move_operands();
- MoveOperands* replacement = nullptr;
- MoveOperands* to_eliminate = nullptr;
- for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) {
- if (curr->IsEliminated()) continue;
- if (curr->destination()->Equals(move->source())) {
- DCHECK(!replacement);
- replacement = curr;
- if (to_eliminate != nullptr) break;
- } else if (curr->destination()->Equals(move->destination())) {
- DCHECK(!to_eliminate);
- to_eliminate = curr;
- if (replacement != nullptr) break;
- }
- }
- DCHECK(!(replacement == to_eliminate && replacement != nullptr));
- if (replacement != nullptr) {
- auto new_source = InstructionOperand::New(
- zone, replacement->source()->kind(), replacement->source()->index());
- move->set_source(new_source);
- }
- return to_eliminate;
-}
+typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
+typedef ZoneMap<MoveKey, unsigned> MoveMap;
+typedef ZoneSet<InstructionOperand> OperandSet;
bool GapsCanMoveOver(Instruction* instr) {
@@ -75,6 +53,10 @@ void MoveOptimizer::Run() {
for (auto* block : code()->instruction_blocks()) {
CompressBlock(block);
}
+ for (auto block : code()->instruction_blocks()) {
+ if (block->PredecessorCount() <= 1) continue;
+ OptimizeMerge(block);
+ }
for (auto gap : to_finalize_) {
FinalizeMoves(gap);
}
@@ -90,7 +72,7 @@ void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
// merging the two gaps.
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
- MoveOperands* to_eliminate = PrepareInsertAfter(left, op, code_zone());
+ auto to_eliminate = left->PrepareInsertAfter(op);
if (to_eliminate != nullptr) eliminated->push_back(to_eliminate);
}
// Eliminate dead moves. Must happen before insertion of new moves as the
@@ -157,6 +139,107 @@ void MoveOptimizer::CompressBlock(InstructionBlock* block) {
}
+GapInstruction* MoveOptimizer::LastGap(InstructionBlock* block) {
+ int gap_index = block->last_instruction_index() - 1;
+ auto instr = code()->instructions()[gap_index];
+ return GapInstruction::cast(instr);
+}
+
+
+void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
+ DCHECK(block->PredecessorCount() > 1);
+ // Ensure that the last instruction in all incoming blocks don't contain
+ // things that would prevent moving gap moves across them.
+ for (auto pred_index : block->predecessors()) {
+ auto pred = code()->InstructionBlockAt(pred_index);
+ auto last_instr = code()->instructions()[pred->last_instruction_index()];
+ DCHECK(!last_instr->IsGapMoves());
+ if (last_instr->IsSourcePosition()) continue;
+ if (last_instr->IsCall()) return;
+ if (last_instr->TempCount() != 0) return;
+ if (last_instr->OutputCount() != 0) return;
+ for (size_t i = 0; i < last_instr->InputCount(); ++i) {
+ auto op = last_instr->InputAt(i);
+ if (!op->IsConstant() && !op->IsImmediate()) return;
+ }
+ }
+ // TODO(dcarney): pass a ZonePool down for this?
+ MoveMap move_map(local_zone());
+ size_t correct_counts = 0;
+ // Accumulate set of shared moves.
+ for (auto pred_index : block->predecessors()) {
+ auto pred = code()->InstructionBlockAt(pred_index);
+ auto gap = LastGap(pred);
+ if (gap->parallel_moves()[0] == nullptr ||
+ gap->parallel_moves()[0]->move_operands()->is_empty()) {
+ return;
+ }
+ auto move_ops = gap->parallel_moves()[0]->move_operands();
+ for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
+ if (op->IsRedundant()) continue;
+ auto src = *op->source();
+ auto dst = *op->destination();
+ MoveKey key = {src, dst};
+ auto res = move_map.insert(std::make_pair(key, 1));
+ if (!res.second) {
+ res.first->second++;
+ if (res.first->second == block->PredecessorCount()) {
+ correct_counts++;
+ }
+ }
+ }
+ }
+ if (move_map.empty() || correct_counts != move_map.size()) return;
+ // Find insertion point.
+ GapInstruction* gap = nullptr;
+ for (int i = block->first_instruction_index();
+ i <= block->last_instruction_index(); ++i) {
+ auto instr = code()->instructions()[i];
+ if (instr->IsGapMoves()) {
+ gap = GapInstruction::cast(instr);
+ continue;
+ }
+ if (!GapsCanMoveOver(instr)) break;
+ }
+ DCHECK(gap != nullptr);
+ bool gap_initialized = true;
+ if (gap->parallel_moves()[0] == nullptr ||
+ gap->parallel_moves()[0]->move_operands()->is_empty()) {
+ to_finalize_.push_back(gap);
+ } else {
+ // Will compress after insertion.
+ gap_initialized = false;
+ std::swap(gap->parallel_moves()[0], gap->parallel_moves()[1]);
+ }
+ auto move = gap->GetOrCreateParallelMove(
+ static_cast<GapInstruction::InnerPosition>(0), code_zone());
+ // Delete relevant entries in predecessors and move everything to block.
+ bool first_iteration = true;
+ for (auto pred_index : block->predecessors()) {
+ auto pred = code()->InstructionBlockAt(pred_index);
+ auto gap = LastGap(pred);
+ auto move_ops = gap->parallel_moves()[0]->move_operands();
+ for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
+ if (op->IsRedundant()) continue;
+ MoveKey key = {*op->source(), *op->destination()};
+ auto it = move_map.find(key);
+ USE(it);
+ DCHECK(it != move_map.end());
+ if (first_iteration) {
+ move->AddMove(op->source(), op->destination(), code_zone());
+ }
+ op->Eliminate();
+ }
+ first_iteration = false;
+ }
+ // Compress.
+ if (!gap_initialized) {
+ CompressMoves(&temp_vector_0(), gap->parallel_moves()[0],
+ gap->parallel_moves()[1]);
+ }
+}
+
+
// Split multiple loads of the same constant or stack slot off into the second
// slot and keep remaining moves in the first slot.
void MoveOptimizer::FinalizeMoves(GapInstruction* gap) {
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/move-optimizer.h
index 2bde09eae5..e5fa1258e0 100644
--- a/deps/v8/src/compiler/move-optimizer.h
+++ b/deps/v8/src/compiler/move-optimizer.h
@@ -30,6 +30,8 @@ class MoveOptimizer FINAL {
void CompressBlock(InstructionBlock* blocke);
void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
ParallelMove* right);
+ GapInstruction* LastGap(InstructionBlock* block);
+ void OptimizeMerge(InstructionBlock* block);
void FinalizeMoves(GapInstruction* gap);
Zone* const local_zone_;
diff --git a/deps/v8/src/compiler/node-matchers.cc b/deps/v8/src/compiler/node-matchers.cc
new file mode 100644
index 0000000000..c6ae39000c
--- /dev/null
+++ b/deps/v8/src/compiler/node-matchers.cc
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+bool NodeMatcher::IsComparison() const {
+ return IrOpcode::IsComparisonOpcode(opcode());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index fc11a0a8cf..48ff3d7323 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -7,6 +7,8 @@
#include <cmath>
+// TODO(turbofan): Move ExternalReference out of assembler.h
+#include "src/assembler.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/unique.h"
@@ -28,6 +30,8 @@ struct NodeMatcher {
}
Node* InputAt(int index) const { return node()->InputAt(index); }
+ bool IsComparison() const;
+
#define DEFINE_IS_OPCODE(Opcode) \
bool Is##Opcode() const { return opcode() == IrOpcode::k##Opcode; }
ALL_OP_LIST(DEFINE_IS_OPCODE)
@@ -153,6 +157,32 @@ struct HeapObjectMatcher FINAL
};
+// A pattern matcher for external reference constants.
+struct ExternalReferenceMatcher FINAL
+ : public ValueMatcher<ExternalReference, IrOpcode::kExternalConstant> {
+ explicit ExternalReferenceMatcher(Node* node)
+ : ValueMatcher<ExternalReference, IrOpcode::kExternalConstant>(node) {}
+};
+
+
+// For shorter pattern matching code, this struct matches the inputs to
+// machine-level load operations.
+template <typename Object>
+struct LoadMatcher : public NodeMatcher {
+ explicit LoadMatcher(Node* node)
+ : NodeMatcher(node), object_(InputAt(0)), index_(InputAt(1)) {}
+
+ typedef Object ObjectMatcher;
+
+ Object const& object() const { return object_; }
+ IntPtrMatcher const& index() const { return index_; }
+
+ private:
+ Object const object_;
+ IntPtrMatcher const index_;
+};
+
+
// For shorter pattern matching code, this struct matches both the left and
// right hand sides of a binary operation and can put constants on the right
// if they appear on the left hand side of a commutative operation.
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index 47de74e329..8956915a2c 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/node-properties.h"
-
#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
namespace v8 {
@@ -58,9 +58,9 @@ Node* NodeProperties::GetContextInput(Node* node) {
// static
-Node* NodeProperties::GetFrameStateInput(Node* node) {
- DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
- return node->InputAt(FirstFrameStateIndex(node));
+Node* NodeProperties::GetFrameStateInput(Node* node, int index) {
+ DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
+ return node->InputAt(FirstFrameStateIndex(node) + index);
}
@@ -138,9 +138,10 @@ void NodeProperties::ReplaceEffectInput(Node* node, Node* effect, int index) {
// static
-void NodeProperties::ReplaceFrameStateInput(Node* node, Node* frame_state) {
- DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
- node->ReplaceInput(FirstFrameStateIndex(node), frame_state);
+void NodeProperties::ReplaceFrameStateInput(Node* node, int index,
+ Node* frame_state) {
+ DCHECK_LT(index, OperatorProperties::GetFrameStateInputCount(node->op()));
+ node->ReplaceInput(FirstFrameStateIndex(node) + index, frame_state);
}
@@ -150,16 +151,40 @@ void NodeProperties::RemoveNonValueInputs(Node* node) {
}
+void NodeProperties::MergeControlToEnd(Graph* graph,
+ CommonOperatorBuilder* common,
+ Node* node) {
+ // Connect the node to the merge exiting the graph.
+ Node* end_pred = NodeProperties::GetControlInput(graph->end());
+ if (end_pred->opcode() == IrOpcode::kMerge) {
+ int inputs = end_pred->op()->ControlInputCount() + 1;
+ end_pred->AppendInput(graph->zone(), node);
+ end_pred->set_op(common->Merge(inputs));
+ } else {
+ Node* merge = graph->NewNode(common->Merge(2), end_pred, node);
+ NodeProperties::ReplaceControlInput(graph->end(), merge);
+ }
+}
+
+
// static
-void NodeProperties::ReplaceWithValue(Node* node, Node* value, Node* effect) {
- DCHECK(node->op()->ControlOutputCount() == 0);
+void NodeProperties::ReplaceWithValue(Node* node, Node* value, Node* effect,
+ Node* control) {
if (!effect && node->op()->EffectInputCount() > 0) {
effect = NodeProperties::GetEffectInput(node);
}
+ if (control == nullptr && node->op()->ControlInputCount() > 0) {
+ control = NodeProperties::GetControlInput(node);
+ }
- // Requires distinguishing between value and effect edges.
+ // Requires distinguishing between value, effect and control edges.
for (Edge edge : node->use_edges()) {
- if (IsEffectEdge(edge)) {
+ if (IsControlEdge(edge)) {
+ DCHECK_EQ(IrOpcode::kIfSuccess, edge.from()->opcode());
+ DCHECK_NOT_NULL(control);
+ edge.from()->ReplaceUses(control);
+ edge.UpdateTo(NULL);
+ } else if (IsEffectEdge(edge)) {
DCHECK_NOT_NULL(effect);
edge.UpdateTo(effect);
} else {
@@ -185,16 +210,13 @@ Node* NodeProperties::FindProjection(Node* node, size_t projection_index) {
void NodeProperties::CollectControlProjections(Node* node, Node** projections,
size_t projection_count) {
#ifdef DEBUG
- DCHECK_EQ(static_cast<int>(projection_count), node->UseCount());
+ DCHECK_LE(static_cast<int>(projection_count), node->UseCount());
std::memset(projections, 0, sizeof(*projections) * projection_count);
#endif
size_t if_value_index = 0;
for (Node* const use : node->uses()) {
size_t index;
switch (use->opcode()) {
- default:
- UNREACHABLE();
- // Fall through.
case IrOpcode::kIfTrue:
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
index = 0;
@@ -203,6 +225,14 @@ void NodeProperties::CollectControlProjections(Node* node, Node** projections,
DCHECK_EQ(IrOpcode::kBranch, node->opcode());
index = 1;
break;
+ case IrOpcode::kIfSuccess:
+ DCHECK_EQ(IrOpcode::kCall, node->opcode());
+ index = 0;
+ break;
+ case IrOpcode::kIfException:
+ DCHECK_EQ(IrOpcode::kCall, node->opcode());
+ index = 1;
+ break;
case IrOpcode::kIfValue:
DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
index = if_value_index++;
@@ -211,6 +241,8 @@ void NodeProperties::CollectControlProjections(Node* node, Node** projections,
DCHECK_EQ(IrOpcode::kSwitch, node->opcode());
index = projection_count - 1;
break;
+ default:
+ continue;
}
DCHECK_LT(if_value_index, projection_count);
DCHECK_LT(index, projection_count);
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index a13eea3a02..350e083017 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -12,7 +12,9 @@ namespace v8 {
namespace internal {
namespace compiler {
+class Graph;
class Operator;
+class CommonOperatorBuilder;
// A facade that simplifies access to the different kinds of inputs to a node.
class NodeProperties FINAL {
@@ -39,7 +41,7 @@ class NodeProperties FINAL {
static Node* GetValueInput(Node* node, int index);
static Node* GetContextInput(Node* node);
- static Node* GetFrameStateInput(Node* node);
+ static Node* GetFrameStateInput(Node* node, int index);
static Node* GetEffectInput(Node* node, int index = 0);
static Node* GetControlInput(Node* node, int index = 0);
@@ -77,13 +79,19 @@ class NodeProperties FINAL {
static void ReplaceContextInput(Node* node, Node* context);
static void ReplaceControlInput(Node* node, Node* control);
static void ReplaceEffectInput(Node* node, Node* effect, int index = 0);
- static void ReplaceFrameStateInput(Node* node, Node* frame_state);
+ static void ReplaceFrameStateInput(Node* node, int index, Node* frame_state);
static void RemoveNonValueInputs(Node* node);
- // Replace value uses of {node} with {value} and effect uses of {node} with
- // {effect}. If {effect == NULL}, then use the effect input to {node}.
- static void ReplaceWithValue(Node* node, Node* value, Node* effect = nullptr);
+ // Merge the control node {node} into the end of the graph, introducing a
+ // merge node or expanding an existing merge node if necessary.
+ static void MergeControlToEnd(Graph* graph, CommonOperatorBuilder* common,
+ Node* node);
+ // Replace value uses of {node} with {value} and effect uses of {node} with
+ // {effect}. If {effect == NULL}, then use the effect input to {node}. All
+ // control uses will be relaxed assuming {node} cannot throw.
+ static void ReplaceWithValue(Node* node, Node* value, Node* effect = nullptr,
+ Node* control = nullptr);
// ---------------------------------------------------------------------------
// Miscellaneous utilities.
@@ -91,8 +99,9 @@ class NodeProperties FINAL {
static Node* FindProjection(Node* node, size_t projection_index);
// Collect the branch-related projections from a node, such as IfTrue,
- // IfFalse, IfValue and IfDefault.
+ // IfFalse, IfSuccess, IfException, IfValue and IfDefault.
// - Branch: [ IfTrue, IfFalse ]
+ // - Call : [ IfSuccess, IfException ]
// - Switch: [ IfValue, ..., IfDefault ]
static void CollectControlProjections(Node* node, Node** proj, size_t count);
diff --git a/deps/v8/src/compiler/node.cc b/deps/v8/src/compiler/node.cc
index d38e9ceff7..1a9c326f20 100644
--- a/deps/v8/src/compiler/node.cc
+++ b/deps/v8/src/compiler/node.cc
@@ -40,7 +40,7 @@ Node* Node::New(Zone* zone, NodeId id, const Operator* op, int input_count,
void Node::Kill() {
DCHECK_NOT_NULL(op());
- RemoveAllInputs();
+ NullAllInputs();
DCHECK(uses().empty());
}
@@ -89,7 +89,7 @@ void Node::RemoveInput(int index) {
}
-void Node::RemoveAllInputs() {
+void Node::NullAllInputs() {
for (Edge edge : input_edges()) edge.UpdateTo(nullptr);
}
@@ -118,33 +118,23 @@ int Node::UseCount() const {
}
-Node* Node::UseAt(int index) const {
- DCHECK_LE(0, index);
- DCHECK_LT(index, UseCount());
- const Use* use = first_use_;
- while (index-- != 0) {
- use = use->next;
- }
- return use->from;
-}
-
+void Node::ReplaceUses(Node* that) {
+ DCHECK(this->first_use_ == nullptr || this->first_use_->prev == nullptr);
+ DCHECK(that->first_use_ == nullptr || that->first_use_->prev == nullptr);
-void Node::ReplaceUses(Node* replace_to) {
- for (Use* use = first_use_; use; use = use->next) {
- use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+ // Update the pointers to {this} to point to {that}.
+ Use* last_use = nullptr;
+ for (Use* use = this->first_use_; use; use = use->next) {
+ use->from->GetInputRecordPtr(use->input_index)->to = that;
+ last_use = use;
}
- if (!replace_to->last_use_) {
- DCHECK(!replace_to->first_use_);
- replace_to->first_use_ = first_use_;
- replace_to->last_use_ = last_use_;
- } else if (first_use_) {
- DCHECK_NOT_NULL(replace_to->first_use_);
- replace_to->last_use_->next = first_use_;
- first_use_->prev = replace_to->last_use_;
- replace_to->last_use_ = last_use_;
+ if (last_use) {
+ // Concat the use list of {this} and {that}.
+ last_use->next = that->first_use_;
+ if (that->first_use_) that->first_use_->prev = last_use;
+ that->first_use_ = this->first_use_;
}
first_use_ = nullptr;
- last_use_ = nullptr;
}
@@ -174,8 +164,7 @@ Node::Node(NodeId id, const Operator* op, int input_count,
bit_field_(InputCountField::encode(input_count) |
ReservedInputCountField::encode(reserved_input_count) |
HasAppendableInputsField::encode(false)),
- first_use_(nullptr),
- last_use_(nullptr) {}
+ first_use_(nullptr) {}
void Node::EnsureAppendableInputs(Zone* zone) {
@@ -192,24 +181,21 @@ void Node::EnsureAppendableInputs(Zone* zone) {
void Node::AppendUse(Use* const use) {
- use->next = nullptr;
- use->prev = last_use_;
- if (last_use_) {
- last_use_->next = use;
- } else {
- first_use_ = use;
- }
- last_use_ = use;
+ DCHECK(first_use_ == nullptr || first_use_->prev == nullptr);
+ use->next = first_use_;
+ use->prev = nullptr;
+ if (first_use_) first_use_->prev = use;
+ first_use_ = use;
}
void Node::RemoveUse(Use* const use) {
- if (use == last_use_) {
- last_use_ = use->prev;
- }
+ DCHECK(first_use_ == nullptr || first_use_->prev == nullptr);
if (use->prev) {
+ DCHECK_NE(first_use_, use);
use->prev->next = use->next;
} else {
+ DCHECK_EQ(first_use_, use);
first_use_ = use->next;
}
if (use->next) {
diff --git a/deps/v8/src/compiler/node.h b/deps/v8/src/compiler/node.h
index 57a0ebb72e..e4bd0d0bdc 100644
--- a/deps/v8/src/compiler/node.h
+++ b/deps/v8/src/compiler/node.h
@@ -63,11 +63,10 @@ class Node FINAL {
void AppendInput(Zone* zone, Node* new_to);
void InsertInput(Zone* zone, int index, Node* new_to);
void RemoveInput(int index);
- void RemoveAllInputs();
+ void NullAllInputs();
void TrimInputCount(int new_input_count);
int UseCount() const;
- Node* UseAt(int index) const;
void ReplaceUses(Node* replace_to);
class InputEdges FINAL {
@@ -226,7 +225,6 @@ class Node FINAL {
NodeId const id_;
unsigned bit_field_;
Use* first_use_;
- Use* last_use_;
union {
// When a node is initially allocated, it uses a static buffer to hold its
// inputs under the assumption that the number of outputs will not increase.
@@ -249,6 +247,7 @@ std::ostream& operator<<(std::ostream& os, const Node& n);
// Typedefs to shorten commonly used Node containers.
typedef ZoneDeque<Node*> NodeDeque;
+typedef ZoneSet<Node*> NodeSet;
typedef ZoneVector<Node*> NodeVector;
typedef ZoneVector<NodeVector> NodeVectorVector;
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index a4f8d3ec16..73ce698720 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -10,12 +10,15 @@
V(Dead) \
V(Loop) \
V(Branch) \
+ V(Switch) \
V(IfTrue) \
V(IfFalse) \
- V(Switch) \
+ V(IfSuccess) \
+ V(IfException) \
V(IfValue) \
V(IfDefault) \
V(Merge) \
+ V(Deoptimize) \
V(Return) \
V(OsrNormalEntry) \
V(OsrLoopEntry) \
@@ -45,6 +48,7 @@
V(Finish) \
V(FrameState) \
V(StateValues) \
+ V(TypedStateValues) \
V(Call) \
V(Parameter) \
V(OsrValue) \
@@ -128,7 +132,7 @@
V(JSCallFunction) \
V(JSCallRuntime) \
V(JSYield) \
- V(JSDebugger)
+ V(JSStackCheck)
#define JS_OP_LIST(V) \
JS_SIMPLE_BINOP_LIST(V) \
@@ -138,113 +142,124 @@
JS_OTHER_OP_LIST(V)
// Opcodes for VirtuaMachine-level operators.
-#define SIMPLIFIED_OP_LIST(V) \
- V(AnyToBoolean) \
- V(BooleanNot) \
- V(BooleanToNumber) \
- V(NumberEqual) \
- V(NumberLessThan) \
- V(NumberLessThanOrEqual) \
- V(NumberAdd) \
- V(NumberSubtract) \
- V(NumberMultiply) \
- V(NumberDivide) \
- V(NumberModulus) \
- V(NumberToInt32) \
- V(NumberToUint32) \
- V(PlainPrimitiveToNumber) \
- V(ReferenceEqual) \
- V(StringEqual) \
- V(StringLessThan) \
- V(StringLessThanOrEqual) \
- V(StringAdd) \
- V(ChangeTaggedToInt32) \
- V(ChangeTaggedToUint32) \
- V(ChangeTaggedToFloat64) \
- V(ChangeInt32ToTagged) \
- V(ChangeUint32ToTagged) \
- V(ChangeFloat64ToTagged) \
- V(ChangeBoolToBit) \
- V(ChangeBitToBool) \
- V(LoadField) \
- V(LoadBuffer) \
- V(LoadElement) \
- V(StoreField) \
- V(StoreBuffer) \
- V(StoreElement) \
- V(ObjectIsSmi) \
+#define SIMPLIFIED_COMPARE_BINOP_LIST(V) \
+ V(NumberEqual) \
+ V(NumberLessThan) \
+ V(NumberLessThanOrEqual) \
+ V(ReferenceEqual) \
+ V(StringEqual) \
+ V(StringLessThan) \
+ V(StringLessThanOrEqual)
+
+#define SIMPLIFIED_OP_LIST(V) \
+ SIMPLIFIED_COMPARE_BINOP_LIST(V) \
+ V(BooleanNot) \
+ V(BooleanToNumber) \
+ V(NumberAdd) \
+ V(NumberSubtract) \
+ V(NumberMultiply) \
+ V(NumberDivide) \
+ V(NumberModulus) \
+ V(NumberToInt32) \
+ V(NumberToUint32) \
+ V(PlainPrimitiveToNumber) \
+ V(StringAdd) \
+ V(ChangeTaggedToInt32) \
+ V(ChangeTaggedToUint32) \
+ V(ChangeTaggedToFloat64) \
+ V(ChangeInt32ToTagged) \
+ V(ChangeUint32ToTagged) \
+ V(ChangeFloat64ToTagged) \
+ V(ChangeBoolToBit) \
+ V(ChangeBitToBool) \
+ V(LoadField) \
+ V(LoadBuffer) \
+ V(LoadElement) \
+ V(StoreField) \
+ V(StoreBuffer) \
+ V(StoreElement) \
+ V(ObjectIsSmi) \
V(ObjectIsNonNegativeSmi)
// Opcodes for Machine-level operators.
-#define MACHINE_OP_LIST(V) \
- V(Load) \
- V(Store) \
- V(Word32And) \
- V(Word32Or) \
- V(Word32Xor) \
- V(Word32Shl) \
- V(Word32Shr) \
- V(Word32Sar) \
- V(Word32Ror) \
- V(Word32Equal) \
- V(Word64And) \
- V(Word64Or) \
- V(Word64Xor) \
- V(Word64Shl) \
- V(Word64Shr) \
- V(Word64Sar) \
- V(Word64Ror) \
- V(Word64Equal) \
- V(Int32Add) \
- V(Int32AddWithOverflow) \
- V(Int32Sub) \
- V(Int32SubWithOverflow) \
- V(Int32Mul) \
- V(Int32MulHigh) \
- V(Int32Div) \
- V(Int32Mod) \
- V(Int32LessThan) \
- V(Int32LessThanOrEqual) \
- V(Uint32Div) \
- V(Uint32LessThan) \
- V(Uint32LessThanOrEqual) \
- V(Uint32Mod) \
- V(Uint32MulHigh) \
- V(Int64Add) \
- V(Int64Sub) \
- V(Int64Mul) \
- V(Int64Div) \
- V(Int64Mod) \
- V(Int64LessThan) \
- V(Int64LessThanOrEqual) \
- V(Uint64Div) \
- V(Uint64LessThan) \
- V(Uint64Mod) \
- V(ChangeFloat32ToFloat64) \
- V(ChangeFloat64ToInt32) \
- V(ChangeFloat64ToUint32) \
- V(ChangeInt32ToFloat64) \
- V(ChangeInt32ToInt64) \
- V(ChangeUint32ToFloat64) \
- V(ChangeUint32ToUint64) \
- V(TruncateFloat64ToFloat32) \
- V(TruncateFloat64ToInt32) \
- V(TruncateInt64ToInt32) \
- V(Float64Add) \
- V(Float64Sub) \
- V(Float64Mul) \
- V(Float64Div) \
- V(Float64Mod) \
- V(Float64Sqrt) \
- V(Float64Equal) \
- V(Float64LessThan) \
- V(Float64LessThanOrEqual) \
- V(Float64Floor) \
- V(Float64Ceil) \
- V(Float64RoundTruncate) \
- V(Float64RoundTiesAway) \
- V(LoadStackPointer) \
- V(CheckedLoad) \
+#define MACHINE_COMPARE_BINOP_LIST(V) \
+ V(Word32Equal) \
+ V(Word64Equal) \
+ V(Int32LessThan) \
+ V(Int32LessThanOrEqual) \
+ V(Uint32LessThan) \
+ V(Uint32LessThanOrEqual) \
+ V(Int64LessThan) \
+ V(Int64LessThanOrEqual) \
+ V(Uint64LessThan) \
+ V(Float64Equal) \
+ V(Float64LessThan) \
+ V(Float64LessThanOrEqual)
+
+#define MACHINE_OP_LIST(V) \
+ MACHINE_COMPARE_BINOP_LIST(V) \
+ V(Load) \
+ V(Store) \
+ V(Word32And) \
+ V(Word32Or) \
+ V(Word32Xor) \
+ V(Word32Shl) \
+ V(Word32Shr) \
+ V(Word32Sar) \
+ V(Word32Ror) \
+ V(Word32Clz) \
+ V(Word64And) \
+ V(Word64Or) \
+ V(Word64Xor) \
+ V(Word64Shl) \
+ V(Word64Shr) \
+ V(Word64Sar) \
+ V(Word64Ror) \
+ V(Int32Add) \
+ V(Int32AddWithOverflow) \
+ V(Int32Sub) \
+ V(Int32SubWithOverflow) \
+ V(Int32Mul) \
+ V(Int32MulHigh) \
+ V(Int32Div) \
+ V(Int32Mod) \
+ V(Uint32Div) \
+ V(Uint32Mod) \
+ V(Uint32MulHigh) \
+ V(Int64Add) \
+ V(Int64Sub) \
+ V(Int64Mul) \
+ V(Int64Div) \
+ V(Int64Mod) \
+ V(Uint64Div) \
+ V(Uint64Mod) \
+ V(ChangeFloat32ToFloat64) \
+ V(ChangeFloat64ToInt32) \
+ V(ChangeFloat64ToUint32) \
+ V(ChangeInt32ToFloat64) \
+ V(ChangeInt32ToInt64) \
+ V(ChangeUint32ToFloat64) \
+ V(ChangeUint32ToUint64) \
+ V(TruncateFloat64ToFloat32) \
+ V(TruncateFloat64ToInt32) \
+ V(TruncateInt64ToInt32) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Mod) \
+ V(Float64Max) \
+ V(Float64Min) \
+ V(Float64Sqrt) \
+ V(Float64RoundDown) \
+ V(Float64RoundTruncate) \
+ V(Float64RoundTiesAway) \
+ V(Float64ExtractLowWord32) \
+ V(Float64ExtractHighWord32) \
+ V(Float64InsertLowWord32) \
+ V(Float64InsertHighWord32) \
+ V(LoadStackPointer) \
+ V(CheckedLoad) \
V(CheckedStore)
#define VALUE_OP_LIST(V) \
@@ -291,7 +306,7 @@ class IrOpcode {
// Returns true if opcode for JavaScript operator.
static bool IsJsOpcode(Value value) {
- return kJSEqual <= value && value <= kJSDebugger;
+ return kJSEqual <= value && value <= kJSStackCheck;
}
// Returns true if opcode for constant operator.
@@ -299,8 +314,23 @@ class IrOpcode {
return kInt32Constant <= value && value <= kHeapConstant;
}
- static bool IsPhiOpcode(Value val) {
- return val == kPhi || val == kEffectPhi;
+ static bool IsPhiOpcode(Value value) {
+ return value == kPhi || value == kEffectPhi;
+ }
+
+ static bool IsMergeOpcode(Value value) {
+ return value == kMerge || value == kLoop;
+ }
+
+ static bool IsIfProjectionOpcode(Value value) {
+ return kIfTrue <= value && value <= kIfDefault;
+ }
+
+ // Returns true if opcode for comparison operator.
+ static bool IsComparisonOpcode(Value value) {
+ return (kJSEqual <= value && value <= kJSGreaterThanOrEqual) ||
+ (kNumberEqual <= value && value <= kStringLessThanOrEqual) ||
+ (kWord32Equal <= value && value <= kFloat64LessThanOrEqual);
}
};
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 53bd16c0af..3a91fd6b58 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -20,13 +20,13 @@ bool OperatorProperties::HasContextInput(const Operator* op) {
// static
-bool OperatorProperties::HasFrameStateInput(const Operator* op) {
+int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
if (!FLAG_turbo_deoptimization) {
- return false;
+ return 0;
}
switch (op->opcode()) {
case IrOpcode::kFrameState:
- return true;
+ return 1;
case IrOpcode::kJSCallRuntime: {
const CallRuntimeParameters& p = CallRuntimeParametersOf(op);
return Linkage::NeedsFrameState(p.id());
@@ -35,7 +35,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
// Strict equality cannot lazily deoptimize.
case IrOpcode::kJSStrictEqual:
case IrOpcode::kJSStrictNotEqual:
- return false;
+ return 0;
// Calls
case IrOpcode::kJSCallFunction:
@@ -51,19 +51,6 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSLessThanOrEqual:
case IrOpcode::kJSNotEqual:
- // Binary operations
- case IrOpcode::kJSAdd:
- case IrOpcode::kJSBitwiseAnd:
- case IrOpcode::kJSBitwiseOr:
- case IrOpcode::kJSBitwiseXor:
- case IrOpcode::kJSDivide:
- case IrOpcode::kJSModulus:
- case IrOpcode::kJSMultiply:
- case IrOpcode::kJSShiftLeft:
- case IrOpcode::kJSShiftRight:
- case IrOpcode::kJSShiftRightLogical:
- case IrOpcode::kJSSubtract:
-
// Context operations
case IrOpcode::kJSCreateWithContext:
@@ -72,16 +59,40 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSToNumber:
case IrOpcode::kJSToName:
+ // Misc operations
+ case IrOpcode::kJSStackCheck:
+
// Properties
case IrOpcode::kJSLoadNamed:
- case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSStoreNamed:
- case IrOpcode::kJSStoreProperty:
+ case IrOpcode::kJSLoadProperty:
case IrOpcode::kJSDeleteProperty:
- return true;
+ return 1;
+
+ // StoreProperty provides a second frame state just before
+ // the operation. This is used to lazy-deoptimize a to-number
+ // conversion for typed arrays.
+ case IrOpcode::kJSStoreProperty:
+ return 2;
+
+ // Binary operators that can deopt in the middle the operation (e.g.,
+ // as a result of lazy deopt in ToNumber conversion) need a second frame
+ // state so that we can resume before the operation.
+ case IrOpcode::kJSMultiply:
+ case IrOpcode::kJSAdd:
+ case IrOpcode::kJSBitwiseAnd:
+ case IrOpcode::kJSBitwiseOr:
+ case IrOpcode::kJSBitwiseXor:
+ case IrOpcode::kJSDivide:
+ case IrOpcode::kJSModulus:
+ case IrOpcode::kJSShiftLeft:
+ case IrOpcode::kJSShiftRight:
+ case IrOpcode::kJSShiftRightLogical:
+ case IrOpcode::kJSSubtract:
+ return 2;
default:
- return false;
+ return 0;
}
}
@@ -100,7 +111,8 @@ bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
- opcode == IrOpcode::kIfFalse || opcode == IrOpcode::kIfValue ||
+ opcode == IrOpcode::kIfFalse || opcode == IrOpcode::kIfSuccess ||
+ opcode == IrOpcode::kIfException || opcode == IrOpcode::kIfValue ||
opcode == IrOpcode::kIfDefault;
}
diff --git a/deps/v8/src/compiler/operator-properties.h b/deps/v8/src/compiler/operator-properties.h
index 37c9755ed7..15ce2e105f 100644
--- a/deps/v8/src/compiler/operator-properties.h
+++ b/deps/v8/src/compiler/operator-properties.h
@@ -18,14 +18,11 @@ class Operator;
class OperatorProperties FINAL {
public:
static bool HasContextInput(const Operator* op);
- static bool HasFrameStateInput(const Operator* op);
-
static int GetContextInputCount(const Operator* op) {
return HasContextInput(op) ? 1 : 0;
}
- static int GetFrameStateInputCount(const Operator* op) {
- return HasFrameStateInput(op) ? 1 : 0;
- }
+ static int GetFrameStateInputCount(const Operator* op);
+
static int GetTotalInputCount(const Operator* op);
static bool IsBasicBlockBegin(const Operator* op);
diff --git a/deps/v8/src/compiler/operator.h b/deps/v8/src/compiler/operator.h
index 6407499da5..ec365fab6f 100644
--- a/deps/v8/src/compiler/operator.h
+++ b/deps/v8/src/compiler/operator.h
@@ -97,6 +97,14 @@ class Operator : public ZoneObject {
int EffectOutputCount() const { return effect_out_; }
int ControlOutputCount() const { return control_out_; }
+ static size_t ZeroIfEliminatable(Properties properties) {
+ return (properties & kEliminatable) == kEliminatable ? 0 : 1;
+ }
+
+ static size_t ZeroIfNoThrow(Properties properties) {
+ return (properties & kNoThrow) == kNoThrow ? 0 : 2;
+ }
+
static size_t ZeroIfPure(Properties properties) {
return (properties & kPure) == kPure ? 0 : 1;
}
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index b7cd7ec93f..2ab5d73984 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -26,6 +26,18 @@ OsrHelper::OsrHelper(CompilationInfo* info)
info->osr_expr_stack_height()) {}
+#ifdef DEBUG
+#define TRACE_COND (FLAG_trace_turbo_graph && FLAG_trace_osr)
+#else
+#define TRACE_COND false
+#endif
+
+#define TRACE(...) \
+ do { \
+ if (TRACE_COND) PrintF(__VA_ARGS__); \
+ } while (false)
+
+
// Peel outer loops and rewire the graph so that control reduction can
// produce a properly formed graph.
static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
@@ -44,6 +56,9 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
NodeVector* mapping =
new (stuff) NodeVector(original_count, sentinel, tmp_zone);
copies.push_back(mapping);
+ TRACE("OsrDuplication #%zu, depth %zu, header #%d:%s\n", copies.size(),
+ loop->depth(), loop_tree->HeaderNode(loop)->id(),
+ loop_tree->HeaderNode(loop)->op()->mnemonic());
// Prepare the mapping for OSR values and the OSR loop entry.
mapping->at(osr_normal_entry->id()) = dead;
@@ -54,6 +69,8 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
outer = outer->parent()) {
for (Node* node : loop_tree->HeaderNodes(outer)) {
mapping->at(node->id()) = dead;
+ TRACE(" ---- #%d:%s -> dead (header)\n", node->id(),
+ node->op()->mnemonic());
}
}
@@ -82,71 +99,132 @@ static void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
NodeProperties::SetBounds(copy, NodeProperties::GetBounds(orig));
}
mapping->at(orig->id()) = copy;
+ TRACE(" copy #%d:%s -> #%d\n", orig->id(), orig->op()->mnemonic(),
+ copy->id());
}
// Fix missing inputs.
- for (size_t i = 0; i < all.live.size(); i++) {
- Node* orig = all.live[i];
+ for (Node* orig : all.live) {
Node* copy = mapping->at(orig->id());
for (int j = 0; j < copy->InputCount(); j++) {
- Node* input = copy->InputAt(j);
- if (input == sentinel)
+ if (copy->InputAt(j) == sentinel) {
copy->ReplaceInput(j, mapping->at(orig->InputAt(j)->id()));
+ }
}
}
- // Construct the transfer from the previous graph copies to the new copy.
+ // Construct the entry into this loop from previous copies.
+
+ // Gather the live loop header nodes, {loop_header} first.
Node* loop_header = loop_tree->HeaderNode(loop);
- NodeVector* previous =
- copies.size() > 1 ? copies[copies.size() - 2] : nullptr;
- const int backedges = loop_header->op()->ControlInputCount() - 1;
- if (backedges == 1) {
- // Simple case. Map the incoming edges to the loop to the previous copy.
- for (Node* node : loop_tree->HeaderNodes(loop)) {
- if (!all.IsLive(node)) continue; // dead phi hanging off loop.
+ NodeVector header_nodes(tmp_zone);
+ header_nodes.reserve(loop->HeaderSize());
+ header_nodes.push_back(loop_header); // put the loop header first.
+ for (Node* node : loop_tree->HeaderNodes(loop)) {
+ if (node != loop_header && all.IsLive(node)) {
+ header_nodes.push_back(node);
+ }
+ }
+
+ // Gather backedges from the previous copies of the inner loops of {loop}.
+ NodeVectorVector backedges(tmp_zone);
+ TRACE("Gathering backedges...\n");
+ for (int i = 1; i < loop_header->InputCount(); i++) {
+ if (TRACE_COND) {
+ Node* control = loop_header->InputAt(i);
+ size_t incoming_depth = 0;
+ for (int j = 0; j < control->op()->ControlInputCount(); j++) {
+ Node* k = NodeProperties::GetControlInput(control, j);
+ incoming_depth =
+ std::max(incoming_depth, loop_tree->ContainingLoop(k)->depth());
+ }
+
+ TRACE(" edge @%d #%d:%s, incoming depth %zu\n", i, control->id(),
+ control->op()->mnemonic(), incoming_depth);
+ }
+
+ for (int pos = static_cast<int>(copies.size()) - 1; pos >= 0; pos--) {
+ backedges.push_back(NodeVector(tmp_zone));
+ backedges.back().reserve(header_nodes.size());
+
+ NodeVector* previous_map = pos > 0 ? copies[pos - 1] : nullptr;
+
+ for (Node* node : header_nodes) {
+ Node* input = node->InputAt(i);
+ if (previous_map) input = previous_map->at(input->id());
+ backedges.back().push_back(input);
+ TRACE(" node #%d:%s(@%d) = #%d:%s\n", node->id(),
+ node->op()->mnemonic(), i, input->id(),
+ input->op()->mnemonic());
+ }
+ }
+ }
+
+ int backedge_count = static_cast<int>(backedges.size());
+ if (backedge_count == 1) {
+ // Simple case of single backedge, therefore a single entry.
+ int index = 0;
+ for (Node* node : header_nodes) {
Node* copy = mapping->at(node->id());
- Node* backedge = node->InputAt(1);
- if (previous) backedge = previous->at(backedge->id());
- copy->ReplaceInput(0, backedge);
+ Node* input = backedges[0][index];
+ copy->ReplaceInput(0, input);
+ TRACE(" header #%d:%s(0) => #%d:%s\n", copy->id(),
+ copy->op()->mnemonic(), input->id(), input->op()->mnemonic());
+ index++;
}
} else {
- // Complex case. Multiple backedges. Introduce a merge for incoming edges.
- tmp_inputs.clear();
- for (int i = 0; i < backedges; i++) {
- Node* backedge = loop_header->InputAt(i + 1);
- if (previous) backedge = previous->at(backedge->id());
- tmp_inputs.push_back(backedge);
- }
- Node* merge =
- graph->NewNode(common->Merge(backedges), backedges, &tmp_inputs[0]);
- for (Node* node : loop_tree->HeaderNodes(loop)) {
- if (!all.IsLive(node)) continue; // dead phi hanging off loop.
+ // Complex case of multiple backedges from previous copies requires
+ // merging the backedges to create the entry into the loop header.
+ Node* merge = nullptr;
+ int index = 0;
+ for (Node* node : header_nodes) {
+ // Gather edge inputs into {tmp_inputs}.
+ tmp_inputs.clear();
+ for (int edge = 0; edge < backedge_count; edge++) {
+ tmp_inputs.push_back(backedges[edge][index]);
+ }
Node* copy = mapping->at(node->id());
+ Node* input;
if (node == loop_header) {
- // The entry to the loop is the merge.
+ // Create the merge for the entry into the loop header.
+ input = merge = graph->NewNode(common->Merge(backedge_count),
+ backedge_count, &tmp_inputs[0]);
copy->ReplaceInput(0, merge);
} else {
- // Merge inputs to the phi at the loop entry.
- tmp_inputs.clear();
- for (int i = 0; i < backedges; i++) {
- Node* backedge = node->InputAt(i + 1);
- if (previous) backedge = previous->at(backedge->id());
- tmp_inputs.push_back(backedge);
- }
+ // Create a phi that merges values at entry into the loop header.
+ DCHECK_NOT_NULL(merge);
+ DCHECK(IrOpcode::IsPhiOpcode(node->opcode()));
tmp_inputs.push_back(merge);
- Node* phi =
- graph->NewNode(common->ResizeMergeOrPhi(node->op(), backedges),
- backedges + 1, &tmp_inputs[0]);
+ Node* phi = input = graph->NewNode(
+ common->ResizeMergeOrPhi(node->op(), backedge_count),
+ backedge_count + 1, &tmp_inputs[0]);
copy->ReplaceInput(0, phi);
}
+
+ // Print the merge.
+ if (TRACE_COND) {
+ TRACE(" header #%d:%s(0) => #%d:%s(", copy->id(),
+ copy->op()->mnemonic(), input->id(), input->op()->mnemonic());
+ for (size_t i = 0; i < tmp_inputs.size(); i++) {
+ if (i > 0) TRACE(", ");
+ Node* input = tmp_inputs[i];
+ TRACE("#%d:%s", input->id(), input->op()->mnemonic());
+ }
+ TRACE(")\n");
+ }
+
+ index++;
}
}
}
// Kill the outer loops in the original graph.
+ TRACE("Killing outer loop headers...\n");
for (LoopTree::Loop* outer = osr_loop->parent(); outer;
outer = outer->parent()) {
- loop_tree->HeaderNode(outer)->ReplaceUses(dead);
+ Node* loop_header = loop_tree->HeaderNode(outer);
+ loop_header->ReplaceUses(dead);
+ TRACE(" ---- #%d:%s\n", loop_header->id(), loop_header->op()->mnemonic());
}
// Merge the ends of the graph copies.
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 5ec5d085f7..b1d7fda9a3 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -8,7 +8,6 @@
#include <sstream>
#include "src/base/platform/elapsed-timer.h"
-#include "src/bootstrapper.h" // TODO(mstarzinger): Only temporary.
#include "src/compiler/ast-graph-builder.h"
#include "src/compiler/ast-loop-assignment-analyzer.h"
#include "src/compiler/basic-block-instrumentor.h"
@@ -26,6 +25,7 @@
#include "src/compiler/js-generic-lowering.h"
#include "src/compiler/js-inlining.h"
#include "src/compiler/js-intrinsic-lowering.h"
+#include "src/compiler/js-type-feedback.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/jump-threading.h"
#include "src/compiler/load-elimination.h"
@@ -47,6 +47,7 @@
#include "src/compiler/verifier.h"
#include "src/compiler/zone-pool.h"
#include "src/ostreams.h"
+#include "src/type-info.h"
#include "src/utils.h"
namespace v8 {
@@ -73,6 +74,7 @@ class PipelineData {
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
+ js_type_feedback_(nullptr),
typer_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
@@ -112,6 +114,7 @@ class PipelineData {
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
+ js_type_feedback_(nullptr),
typer_(nullptr),
schedule_(schedule),
instruction_zone_scope_(zone_pool_),
@@ -138,6 +141,7 @@ class PipelineData {
common_(nullptr),
javascript_(nullptr),
jsgraph_(nullptr),
+ js_type_feedback_(nullptr),
typer_(nullptr),
schedule_(nullptr),
instruction_zone_scope_(zone_pool_),
@@ -175,6 +179,10 @@ class PipelineData {
CommonOperatorBuilder* common() const { return common_; }
JSOperatorBuilder* javascript() const { return javascript_; }
JSGraph* jsgraph() const { return jsgraph_; }
+ JSTypeFeedbackTable* js_type_feedback() { return js_type_feedback_; }
+ void set_js_type_feedback(JSTypeFeedbackTable* js_type_feedback) {
+ js_type_feedback_ = js_type_feedback;
+ }
Typer* typer() const { return typer_.get(); }
LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
@@ -208,6 +216,7 @@ class PipelineData {
common_ = nullptr;
javascript_ = nullptr;
jsgraph_ = nullptr;
+ js_type_feedback_ = nullptr;
schedule_ = nullptr;
}
@@ -260,6 +269,7 @@ class PipelineData {
CommonOperatorBuilder* common_;
JSOperatorBuilder* javascript_;
JSGraph* jsgraph_;
+ JSTypeFeedbackTable* js_type_feedback_;
// TODO(dcarney): make this into a ZoneObject.
SmartPointer<Typer> typer_;
Schedule* schedule_;
@@ -292,20 +302,17 @@ static void TraceSchedule(Schedule* schedule) {
static SmartArrayPointer<char> GetDebugName(CompilationInfo* info) {
- SmartArrayPointer<char> name;
- if (info->IsStub()) {
- if (info->code_stub() != NULL) {
- CodeStub::Major major_key = info->code_stub()->MajorKey();
- const char* major_name = CodeStub::MajorName(major_key, false);
- size_t len = strlen(major_name);
- name.Reset(new char[len]);
- memcpy(name.get(), major_name, len);
- }
+ if (info->code_stub() != NULL) {
+ CodeStub::Major major_key = info->code_stub()->MajorKey();
+ const char* major_name = CodeStub::MajorName(major_key, false);
+ size_t len = strlen(major_name) + 1;
+ SmartArrayPointer<char> name(new char[len]);
+ memcpy(name.get(), major_name, len);
+ return name;
} else {
AllowHandleDereference allow_deref;
- name = info->function()->debug_name()->ToCString();
+ return info->function()->debug_name()->ToCString();
}
- return name;
}
@@ -314,14 +321,16 @@ class AstGraphBuilderWithPositions : public AstGraphBuilder {
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
JSGraph* jsgraph,
LoopAssignmentAnalysis* loop_assignment,
+ JSTypeFeedbackTable* js_type_feedback,
SourcePositionTable* source_positions)
- : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment),
+ : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment,
+ js_type_feedback),
source_positions_(source_positions),
start_position_(info->shared_info()->start_position()) {}
- bool CreateGraph(bool constant_context) {
+ bool CreateGraph(bool constant_context, bool stack_check) {
SourcePositionTable::Scope pos_scope(source_positions_, start_position_);
- return AstGraphBuilder::CreateGraph(constant_context);
+ return AstGraphBuilder::CreateGraph(constant_context, stack_check);
}
#define DEF_VISIT(type) \
@@ -423,8 +432,9 @@ struct GraphBuilderPhase {
void Run(PipelineData* data, Zone* temp_zone, bool constant_context) {
AstGraphBuilderWithPositions graph_builder(
temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
- data->source_positions());
- if (!graph_builder.CreateGraph(constant_context)) {
+ data->js_type_feedback(), data->source_positions());
+ bool stack_check = !data->info()->IsStub();
+ if (!graph_builder.CreateGraph(constant_context, stack_check)) {
data->set_compilation_failed();
}
}
@@ -451,7 +461,10 @@ struct InliningPhase {
void Run(PipelineData* data, Zone* temp_zone) {
SourcePositionTable::Scope pos(data->source_positions(),
SourcePosition::Unknown());
- JSInliner inliner(temp_zone, data->info(), data->jsgraph());
+ JSInliner inliner(data->info()->is_inlining_enabled()
+ ? JSInliner::kGeneralInlining
+ : JSInliner::kBuiltinsInlining,
+ temp_zone, data->info(), data->jsgraph());
GraphReducer graph_reducer(data->graph(), temp_zone);
AddReducer(data, &graph_reducer, &inliner);
graph_reducer.ReduceGraph();
@@ -480,21 +493,38 @@ struct OsrDeconstructionPhase {
};
+struct JSTypeFeedbackPhase {
+ static const char* phase_name() { return "type feedback specializing"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ SourcePositionTable::Scope pos(data->source_positions(),
+ SourcePosition::Unknown());
+ Handle<Context> native_context(data->info()->context()->native_context());
+ TypeFeedbackOracle oracle(data->isolate(), temp_zone,
+ data->info()->unoptimized_code(),
+ data->info()->feedback_vector(), native_context);
+ GraphReducer graph_reducer(data->graph(), temp_zone);
+ JSTypeFeedbackSpecializer specializer(data->jsgraph(),
+ data->js_type_feedback(), &oracle);
+ AddReducer(data, &graph_reducer, &specializer);
+ graph_reducer.ReduceGraph();
+ }
+};
+
+
struct TypedLoweringPhase {
static const char* phase_name() { return "typed lowering"; }
void Run(PipelineData* data, Zone* temp_zone) {
SourcePositionTable::Scope pos(data->source_positions(),
SourcePosition::Unknown());
- ValueNumberingReducer vn_reducer(temp_zone);
LoadElimination load_elimination;
JSBuiltinReducer builtin_reducer(data->jsgraph());
JSTypedLowering typed_lowering(data->jsgraph(), temp_zone);
JSIntrinsicLowering intrinsic_lowering(data->jsgraph());
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer;
+ CommonOperatorReducer common_reducer(data->jsgraph());
GraphReducer graph_reducer(data->graph(), temp_zone);
- AddReducer(data, &graph_reducer, &vn_reducer);
AddReducer(data, &graph_reducer, &builtin_reducer);
AddReducer(data, &graph_reducer, &typed_lowering);
AddReducer(data, &graph_reducer, &intrinsic_lowering);
@@ -518,7 +548,7 @@ struct SimplifiedLoweringPhase {
ValueNumberingReducer vn_reducer(temp_zone);
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer;
+ CommonOperatorReducer common_reducer(data->jsgraph());
GraphReducer graph_reducer(data->graph(), temp_zone);
AddReducer(data, &graph_reducer, &vn_reducer);
AddReducer(data, &graph_reducer, &simple_reducer);
@@ -533,6 +563,8 @@ struct ControlFlowOptimizationPhase {
static const char* phase_name() { return "control flow optimization"; }
void Run(PipelineData* data, Zone* temp_zone) {
+ SourcePositionTable::Scope pos(data->source_positions(),
+ SourcePosition::Unknown());
ControlFlowOptimizer optimizer(data->jsgraph(), temp_zone);
optimizer.Optimize();
}
@@ -549,7 +581,7 @@ struct ChangeLoweringPhase {
SimplifiedOperatorReducer simple_reducer(data->jsgraph());
ChangeLowering lowering(data->jsgraph());
MachineOperatorReducer machine_reducer(data->jsgraph());
- CommonOperatorReducer common_reducer;
+ CommonOperatorReducer common_reducer(data->jsgraph());
GraphReducer graph_reducer(data->graph(), temp_zone);
AddReducer(data, &graph_reducer, &vn_reducer);
AddReducer(data, &graph_reducer, &simple_reducer);
@@ -744,7 +776,7 @@ struct JumpThreadingPhase {
static const char* phase_name() { return "jump threading"; }
void Run(PipelineData* data, Zone* temp_zone) {
- ZoneVector<BasicBlock::RpoNumber> result(temp_zone);
+ ZoneVector<RpoNumber> result(temp_zone);
if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence())) {
JumpThreading::ApplyForwarding(result, data->sequence());
}
@@ -835,11 +867,10 @@ Handle<Code> Pipeline::GenerateCode() {
// the correct solution is to restore the context register after invoking
// builtins from full-codegen.
Handle<SharedFunctionInfo> shared = info()->shared_info();
- if (isolate()->bootstrapper()->IsActive() ||
- shared->disable_optimization_reason() ==
- kBuiltinFunctionCannotBeOptimized) {
- shared->DisableOptimization(kBuiltinFunctionCannotBeOptimized);
- return Handle<Code>::null();
+ for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) {
+ Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i);
+ Object* builtin = isolate()->js_builtins_object()->javascript_builtin(id);
+ if (*info()->closure() == builtin) return Handle<Code>::null();
}
// TODO(dslomov): support turbo optimization of subclass constructors.
@@ -885,6 +916,11 @@ Handle<Code> Pipeline::GenerateCode() {
PipelineData data(&zone_pool, info(), pipeline_statistics.get());
this->data_ = &data;
+ if (info()->is_type_feedback_enabled()) {
+ data.set_js_type_feedback(new (data.graph_zone())
+ JSTypeFeedbackTable(data.graph_zone()));
+ }
+
BeginPhaseKind("graph creation");
if (FLAG_trace_turbo) {
@@ -915,7 +951,7 @@ Handle<Code> Pipeline::GenerateCode() {
RunPrintAndVerify("Context specialized", true);
}
- if (info()->is_inlining_enabled()) {
+ if (info()->is_builtin_inlining_enabled() || info()->is_inlining_enabled()) {
Run<InliningPhase>();
RunPrintAndVerify("Inlined", true);
}
@@ -952,19 +988,24 @@ Handle<Code> Pipeline::GenerateCode() {
RunPrintAndVerify("OSR deconstruction");
}
+ if (info()->is_type_feedback_enabled()) {
+ Run<JSTypeFeedbackPhase>();
+ RunPrintAndVerify("JSType feedback");
+ }
+
// Lower simplified operators and insert changes.
Run<SimplifiedLoweringPhase>();
RunPrintAndVerify("Lowered simplified");
// Optimize control flow.
- if (FLAG_turbo_switch) {
+ if (FLAG_turbo_cf_optimization) {
Run<ControlFlowOptimizationPhase>();
RunPrintAndVerify("Control flow optimized");
}
// Lower changes that have been inserted before.
Run<ChangeLoweringPhase>();
- // // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+ // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Lowered changes", true);
Run<LateControlReductionPhase>();
diff --git a/deps/v8/src/compiler/ppc/OWNERS b/deps/v8/src/compiler/ppc/OWNERS
new file mode 100644
index 0000000000..beecb3d0b1
--- /dev/null
+++ b/deps/v8/src/compiler/ppc/OWNERS
@@ -0,0 +1,3 @@
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 467d035cc4..c61983395c 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -52,7 +52,7 @@ class PPCOperandConverter FINAL : public InstructionOperandConverter {
return false;
}
- Operand InputImmediate(int index) {
+ Operand InputImmediate(size_t index) {
Constant constant = ToConstant(instr_->InputAt(index));
switch (constant.type()) {
case Constant::kInt32:
@@ -76,8 +76,8 @@ class PPCOperandConverter FINAL : public InstructionOperandConverter {
return Operand::Zero();
}
- MemOperand MemoryOperand(AddressingMode* mode, int* first_index) {
- const int index = *first_index;
+ MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
+ const size_t index = *first_index;
*mode = AddressingModeField::decode(instr_->opcode());
switch (*mode) {
case kMode_None:
@@ -93,7 +93,7 @@ class PPCOperandConverter FINAL : public InstructionOperandConverter {
return MemOperand(r0);
}
- MemOperand MemoryOperand(AddressingMode* mode, int first_index = 0) {
+ MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
return MemoryOperand(mode, &first_index);
}
@@ -109,7 +109,7 @@ class PPCOperandConverter FINAL : public InstructionOperandConverter {
};
-static inline bool HasRegisterInput(Instruction* instr, int index) {
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsRegister();
}
@@ -339,6 +339,22 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
} while (0)
+#define ASSEMBLE_FLOAT_MAX(scratch_reg) \
+ do { \
+ __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(0), \
+ i.InputDoubleRegister(1)); \
+ } while (0)
+
+
+#define ASSEMBLE_FLOAT_MIN(scratch_reg) \
+ do { \
+ __ fsub(scratch_reg, i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+ __ fsel(i.OutputDoubleRegister(), scratch_reg, i.InputDoubleRegister(1), \
+ i.InputDoubleRegister(0)); \
+ } while (0)
+
+
#define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
@@ -369,7 +385,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx) \
do { \
- int index = 0; \
+ size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
DoubleRegister value = i.InputDoubleRegister(index); \
@@ -384,7 +400,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
do { \
- int index = 0; \
+ size_t index = 0; \
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
Register value = i.InputRegister(index); \
@@ -401,8 +417,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
do { \
DoubleRegister result = i.OutputDoubleRegister(); \
+ size_t index = 0; \
AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, 0); \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
__ extsw(offset, offset); \
@@ -427,8 +444,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
do { \
Register result = i.OutputRegister(); \
+ size_t index = 0; \
AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, 0); \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
__ extsw(offset, offset); \
@@ -453,8 +471,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr, asm_instrx) \
do { \
Label done; \
+ size_t index = 0; \
AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, 0); \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
__ extsw(offset, offset); \
@@ -479,8 +498,9 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
do { \
Label done; \
+ size_t index = 0; \
AddressingMode mode = kMode_None; \
- MemOperand operand = i.MemoryOperand(&mode, 0); \
+ MemOperand operand = i.MemoryOperand(&mode, index); \
DCHECK_EQ(kMode_MRR, mode); \
Register offset = operand.rb(); \
__ extsw(offset, offset); \
@@ -532,7 +552,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
}
@@ -548,7 +568,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(ip);
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
}
@@ -556,10 +576,24 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
AssembleArchJump(i.InputRpo(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchLookupSwitch:
+ AssembleArchLookupSwitch(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
case kArchNop:
// don't emit code for nops.
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ break;
+ }
case kArchRet:
AssembleReturn();
DCHECK_EQ(LeaveRC, i.OutputRCBit());
@@ -781,6 +815,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kPPC_Neg64:
__ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
break;
+ case kPPC_MaxFloat64:
+ ASSEMBLE_FLOAT_MAX(kScratchDoubleReg);
+ break;
+ case kPPC_MinFloat64:
+ ASSEMBLE_FLOAT_MIN(kScratchDoubleReg);
+ break;
case kPPC_SqrtFloat64:
ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
break;
@@ -799,6 +839,10 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kPPC_NegFloat64:
ASSEMBLE_FLOAT_UNOP_RC(fneg);
break;
+ case kPPC_Cntlz32:
+ __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
case kPPC_Cmp32:
ASSEMBLE_COMPARE(cmpw, cmplw);
break;
@@ -885,6 +929,32 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
+ case kPPC_Float64ExtractLowWord32:
+ __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Float64ExtractHighWord32:
+ __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Float64InsertLowWord32:
+ __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Float64InsertHighWord32:
+ __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
+ case kPPC_Float64Construct:
+#if V8_TARGET_ARCH_PPC64
+ __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
+ i.InputRegister(0), i.InputRegister(1), r0);
+#else
+ __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
+ i.InputRegister(1));
+#endif
+ DCHECK_EQ(LeaveRC, i.OutputRCBit());
+ break;
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
break;
@@ -1008,7 +1078,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
}
@@ -1075,32 +1145,53 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ __ Cmpi(input, Operand(i.InputInt32(index + 0)), r0);
+ __ beq(GetLabel(i.InputRpo(index + 1)));
+ }
+ AssembleArchJump(i.InputRpo(1));
+}
+
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ PPCOperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
+ Label** cases = zone()->NewArray<Label*>(case_count);
+ for (int32_t index = 0; index < case_count; ++index) {
+ cases[index] = GetLabel(i.InputRpo(index + 2));
+ }
+ Label* const table = AddJumpTable(cases, case_count);
+ __ Cmpli(input, Operand(case_count), r0);
+ __ bge(GetLabel(i.InputRpo(1)));
+ __ mov_label_addr(kScratchReg, table);
+ __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
+ __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
+ __ Jump(kScratchReg);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
void CodeGenerator::AssemblePrologue() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
-#if ABI_USES_FUNCTION_DESCRIPTORS
__ function_descriptor();
-#endif
int register_save_area_size = 0;
RegList frame_saves = fp.bit();
__ mflr(r0);
-#if V8_OOL_CONSTANT_POOL
- __ Push(r0, fp, kConstantPoolRegister);
- // Adjust FP to point to saved FP.
- __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
- register_save_area_size += kPointerSize;
- frame_saves |= kConstantPoolRegister.bit();
-#else
__ Push(r0, fp);
__ mr(fp, sp);
-#endif
// Save callee-saved registers.
const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
@@ -1114,12 +1205,11 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
- } else {
+ } else if (stack_slots > 0) {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
}
- int stack_slots = frame()->GetSpillSlotCount();
if (info()->is_osr()) {
// TurboFan OSR-compiled functions cannot be entered directly.
@@ -1131,6 +1221,8 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
stack_slots -= frame()->GetOsrStackSlotCount();
}
@@ -1143,18 +1235,15 @@ void CodeGenerator::AssemblePrologue() {
void CodeGenerator::AssembleReturn() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+ int stack_slots = frame()->GetSpillSlotCount();
if (descriptor->kind() == CallDescriptor::kCallAddress) {
if (frame()->GetRegisterSaveAreaSize() > 0) {
// Remove this frame's spill slots first.
- int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
__ Add(sp, sp, stack_slots * kPointerSize, r0);
}
// Restore registers.
RegList frame_saves = fp.bit();
-#if V8_OOL_CONSTANT_POOL
- frame_saves |= kConstantPoolRegister.bit();
-#endif
const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
if (saves != 0) {
__ MultiPop(saves);
@@ -1162,12 +1251,14 @@ void CodeGenerator::AssembleReturn() {
}
__ LeaveFrame(StackFrame::MANUAL);
__ Ret();
- } else {
+ } else if (descriptor->IsJSFunctionCall() || stack_slots > 0) {
int pop_count = descriptor->IsJSFunctionCall()
? static_cast<int>(descriptor->JSParameterCount())
: 0;
__ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
__ Ret();
+ } else {
+ __ Ret();
}
}
@@ -1289,8 +1380,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
} else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
#else
} else if (source->IsStackSlot()) {
-#endif
DCHECK(destination->IsStackSlot());
+#endif
Register temp_0 = kScratchReg;
Register temp_1 = r0;
MemOperand src = g.ToMemOperand(source);
@@ -1333,6 +1424,13 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
}
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ for (size_t index = 0; index < target_count; ++index) {
+ __ emit_label_addr(targets[index]);
+ }
+}
+
+
void CodeGenerator::AddNopForSmiCodeInlining() {
// We do not insert nops for inlined Smi code.
}
diff --git a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
index 715a904fef..bb0a77117d 100644
--- a/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/ppc/instruction-codes-ppc.h
@@ -67,6 +67,9 @@ namespace compiler {
V(PPC_CeilFloat64) \
V(PPC_TruncateFloat64) \
V(PPC_RoundFloat64) \
+ V(PPC_MaxFloat64) \
+ V(PPC_MinFloat64) \
+ V(PPC_Cntlz32) \
V(PPC_Cmp32) \
V(PPC_Cmp64) \
V(PPC_CmpFloat64) \
@@ -84,6 +87,11 @@ namespace compiler {
V(PPC_Float64ToInt32) \
V(PPC_Float64ToUint32) \
V(PPC_Float64ToFloat32) \
+ V(PPC_Float64ExtractLowWord32) \
+ V(PPC_Float64ExtractHighWord32) \
+ V(PPC_Float64InsertLowWord32) \
+ V(PPC_Float64InsertHighWord32) \
+ V(PPC_Float64Construct) \
V(PPC_LoadWordS8) \
V(PPC_LoadWordU8) \
V(PPC_LoadWordS16) \
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 6d39df6538..ae4c97a9ee 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -132,9 +132,8 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -737,6 +736,12 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
#endif
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Cntlz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
}
@@ -925,6 +930,21 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
// TODO(mbrandy): detect multiply-subtract
+ PPCOperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ // -floor(-x) = ceil(x)
+ Emit(kPPC_CeilFloat64, g.DefineAsRegister(node),
+ g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
VisitRRRFloat64(this, node, kPPC_SubFloat64);
}
@@ -948,18 +968,23 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
- VisitRRFloat64(this, kPPC_SqrtFloat64, node);
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ VisitRRRFloat64(this, node, kPPC_MaxFloat64);
}
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- VisitRRFloat64(this, kPPC_FloorFloat64, node);
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ VisitRRRFloat64(this, node, kPPC_MinFloat64);
}
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- VisitRRFloat64(this, kPPC_CeilFloat64, node);
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRRFloat64(this, kPPC_SqrtFloat64, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRRFloat64(this, kPPC_FloorFloat64, node);
}
@@ -1020,8 +1045,7 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -1111,19 +1135,9 @@ static void VisitWordCompareZero(InstructionSelector* selector, Node* user,
cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
return VisitWord32Compare(selector, value, cont);
#if V8_TARGET_ARCH_PPC64
- case IrOpcode::kWord64Equal: {
- // Combine with comparisons against 0 by simply inverting the
- // continuation.
- Int64BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont->Negate();
- continue;
- }
+ case IrOpcode::kWord64Equal:
cont->OverwriteAndNegateIfEqual(kEqual);
return VisitWord64Compare(selector, value, cont);
- }
case IrOpcode::kInt64LessThan:
cont->OverwriteAndNegateIfEqual(kSignedLessThan);
return VisitWord64Compare(selector, value, cont);
@@ -1235,6 +1249,34 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ PPCOperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value) {
+ index_operand = g.TempRegister();
+ Emit(kPPC_Sub32, index_operand, value_operand,
+ g.TempImmediate(sw.min_value));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+
+ // Generate a sequence of conditional jumps.
+ return EmitLookupSwitch(sw, value_operand);
+}
+
+
void InstructionSelector::VisitWord32Equal(Node* const node) {
FlagsContinuation cont(kEqual, node);
Int32BinopMatcher m(node);
@@ -1317,7 +1359,7 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
PPCOperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
@@ -1342,6 +1384,13 @@ void InstructionSelector::VisitCall(Node* node) {
Emit(kPPC_Push, g.NoOutput(), g.UseRegister(*i));
}
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler != nullptr) {
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -1356,7 +1405,7 @@ void InstructionSelector::VisitCall(Node* node) {
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(descriptor->flags());
+ opcode |= MiscField::encode(flags);
// Emit the call instruction.
InstructionOperand* first_output =
@@ -1368,11 +1417,58 @@ void InstructionSelector::VisitCall(Node* node) {
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float64ExtractLowWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ PPCOperandGenerator g(this);
+ Emit(kPPC_Float64ExtractHighWord32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ PPCOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertHighWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(left),
+ g.UseRegister(right));
+ return;
+ }
+ Emit(kPPC_Float64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ PPCOperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (left->opcode() == IrOpcode::kFloat64InsertLowWord32 &&
+ CanCover(node, left)) {
+ left = left->InputAt(1);
+ Emit(kPPC_Float64Construct, g.DefineAsRegister(node), g.UseRegister(right),
+ g.UseRegister(left));
+ return;
+ }
+ Emit(kPPC_Float64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
+ return MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kFloat64RoundDown |
MachineOperatorBuilder::kFloat64RoundTruncate |
MachineOperatorBuilder::kFloat64RoundTiesAway;
// We omit kWord32ShiftIsSafe as s[rl]w use 0x3f as a mask rather than 0x1f.
diff --git a/deps/v8/src/compiler/ppc/linkage-ppc.cc b/deps/v8/src/compiler/ppc/linkage-ppc.cc
index 38117222a9..39ebb63efa 100644
--- a/deps/v8/src/compiler/ppc/linkage-ppc.cc
+++ b/deps/v8/src/compiler/ppc/linkage-ppc.cc
@@ -53,9 +53,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties, MachineType return_type) {
return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties);
+ stack_parameter_count, flags, properties,
+ return_type);
}
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 04b1dc6f57..bcc96803f7 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -90,6 +90,9 @@ class RawMachineAssembler : public GraphBuilder {
Unique<HeapObject> val = Unique<HeapObject>::CreateUninitialized(object);
return NewNode(common()->HeapConstant(val));
}
+ Node* ExternalConstant(ExternalReference address) {
+ return NewNode(common()->ExternalConstant(address));
+ }
Node* Projection(int index, Node* a) {
return NewNode(common()->Projection(index), a);
@@ -97,14 +100,14 @@ class RawMachineAssembler : public GraphBuilder {
// Memory Operations.
Node* Load(MachineType rep, Node* base) {
- return Load(rep, base, Int32Constant(0));
+ return Load(rep, base, IntPtrConstant(0));
}
Node* Load(MachineType rep, Node* base, Node* index) {
return NewNode(machine()->Load(rep), base, index, graph()->start(),
graph()->start());
}
void Store(MachineType rep, Node* base, Node* value) {
- Store(rep, base, Int32Constant(0), value);
+ Store(rep, base, IntPtrConstant(0), value);
}
void Store(MachineType rep, Node* base, Node* index, Node* value) {
NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)), base,
@@ -172,6 +175,7 @@ class RawMachineAssembler : public GraphBuilder {
Node* Word32Ror(Node* a, Node* b) {
return NewNode(machine()->Word32Ror(), a, b);
}
+ Node* Word32Clz(Node* a) { return NewNode(machine()->Word32Clz(), a); }
Node* Word32Equal(Node* a, Node* b) {
return NewNode(machine()->Word32Equal(), a, b);
}
@@ -281,6 +285,9 @@ class RawMachineAssembler : public GraphBuilder {
Node* Int64LessThan(Node* a, Node* b) {
return NewNode(machine()->Int64LessThan(), a, b);
}
+ Node* Uint64LessThan(Node* a, Node* b) {
+ return NewNode(machine()->Uint64LessThan(), a, b);
+ }
Node* Int64LessThanOrEqual(Node* a, Node* b) {
return NewNode(machine()->Int64LessThanOrEqual(), a, b);
}
@@ -384,8 +391,9 @@ class RawMachineAssembler : public GraphBuilder {
Node* TruncateInt64ToInt32(Node* a) {
return NewNode(machine()->TruncateInt64ToInt32(), a);
}
- Node* Float64Floor(Node* a) { return NewNode(machine()->Float64Floor(), a); }
- Node* Float64Ceil(Node* a) { return NewNode(machine()->Float64Ceil(), a); }
+ Node* Float64RoundDown(Node* a) {
+ return NewNode(machine()->Float64RoundDown(), a);
+ }
Node* Float64RoundTruncate(Node* a) {
return NewNode(machine()->Float64RoundTruncate(), a);
}
@@ -393,6 +401,23 @@ class RawMachineAssembler : public GraphBuilder {
return NewNode(machine()->Float64RoundTiesAway(), a);
}
+ // Float64 bit operations.
+ Node* Float64ExtractLowWord32(Node* a) {
+ return NewNode(machine()->Float64ExtractLowWord32(), a);
+ }
+ Node* Float64ExtractHighWord32(Node* a) {
+ return NewNode(machine()->Float64ExtractHighWord32(), a);
+ }
+ Node* Float64InsertLowWord32(Node* a, Node* b) {
+ return NewNode(machine()->Float64InsertLowWord32(), a, b);
+ }
+ Node* Float64InsertHighWord32(Node* a, Node* b) {
+ return NewNode(machine()->Float64InsertHighWord32(), a, b);
+ }
+
+ // Stack operations.
+ Node* LoadStackPointer() { return NewNode(machine()->LoadStackPointer()); }
+
// Parameters.
Node* Parameter(size_t index);
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
index 434e965bf3..c57591dfb4 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.cc
+++ b/deps/v8/src/compiler/register-allocator-verifier.cc
@@ -162,6 +162,13 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->type_ = kRegister;
}
break;
+ case UnallocatedOperand::MUST_HAVE_SLOT:
+ if (sequence()->IsDouble(vreg)) {
+ constraint->type_ = kDoubleSlot;
+ } else {
+ constraint->type_ = kSlot;
+ }
+ break;
case UnallocatedOperand::SAME_AS_FIRST_INPUT:
constraint->type_ = kSameAsFirst;
break;
@@ -200,6 +207,12 @@ void RegisterAllocatorVerifier::CheckConstraint(
CHECK(op->IsStackSlot());
CHECK_EQ(op->index(), constraint->value_);
return;
+ case kSlot:
+ CHECK(op->IsStackSlot());
+ return;
+ case kDoubleSlot:
+ CHECK(op->IsDoubleStackSlot());
+ return;
case kNone:
CHECK(op->IsRegister() || op->IsStackSlot());
return;
@@ -214,7 +227,7 @@ void RegisterAllocatorVerifier::CheckConstraint(
namespace {
-typedef BasicBlock::RpoNumber Rpo;
+typedef RpoNumber Rpo;
static const int kInvalidVreg = InstructionOperand::kInvalidVirtualRegister;
@@ -245,8 +258,7 @@ class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
struct OperandLess {
bool operator()(const InstructionOperand* a,
const InstructionOperand* b) const {
- if (a->kind() == b->kind()) return a->index() < b->index();
- return a->kind() < b->kind();
+ return *a < *b;
}
};
@@ -302,7 +314,10 @@ class OperandMap : public ZoneObject {
if (i->IsEliminated()) continue;
auto cur = map().find(i->source());
CHECK(cur != map().end());
- to_insert.insert(std::make_pair(i->destination(), cur->second));
+ auto res =
+ to_insert.insert(std::make_pair(i->destination(), cur->second));
+ // Ensure injectivity of moves.
+ CHECK(res.second);
}
// Drop current mappings.
for (auto i = moves->begin(); i != moves->end(); ++i) {
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
index 86fda1670d..074e2998ce 100644
--- a/deps/v8/src/compiler/register-allocator-verifier.h
+++ b/deps/v8/src/compiler/register-allocator-verifier.h
@@ -30,6 +30,8 @@ class RegisterAllocatorVerifier FINAL : public ZoneObject {
kFixedRegister,
kDoubleRegister,
kFixedDoubleRegister,
+ kSlot,
+ kDoubleSlot,
kFixedSlot,
kNone,
kNoneDouble,
diff --git a/deps/v8/src/compiler/register-allocator.cc b/deps/v8/src/compiler/register-allocator.cc
index 1de5773e7f..20fae0cbce 100644
--- a/deps/v8/src/compiler/register-allocator.cc
+++ b/deps/v8/src/compiler/register-allocator.cc
@@ -10,6 +10,11 @@ namespace v8 {
namespace internal {
namespace compiler {
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \
+ } while (false)
+
static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
return a.Value() < b.Value() ? a : b;
}
@@ -20,16 +25,6 @@ static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
}
-static void TraceAlloc(const char* msg, ...) {
- if (FLAG_trace_alloc) {
- va_list arguments;
- va_start(arguments, msg);
- base::OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
-
static void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
auto it = std::find(v->begin(), v->end(), range);
DCHECK(it != v->end());
@@ -39,17 +34,22 @@ static void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
InstructionOperand* hint)
- : operand_(operand),
- hint_(hint),
- pos_(pos),
- next_(nullptr),
- requires_reg_(false),
- register_beneficial_(true) {
+ : operand_(operand), hint_(hint), pos_(pos), next_(nullptr), flags_(0) {
+ bool register_beneficial = true;
+ UsePositionType type = UsePositionType::kAny;
if (operand_ != nullptr && operand_->IsUnallocated()) {
const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
- requires_reg_ = unalloc->HasRegisterPolicy();
- register_beneficial_ = !unalloc->HasAnyPolicy();
+ if (unalloc->HasRegisterPolicy()) {
+ type = UsePositionType::kRequiresRegister;
+ } else if (unalloc->HasSlotPolicy()) {
+ type = UsePositionType::kRequiresSlot;
+ register_beneficial = false;
+ } else {
+ register_beneficial = !unalloc->HasAnyPolicy();
+ }
}
+ flags_ = TypeField::encode(type) |
+ RegisterBeneficialField::encode(register_beneficial);
DCHECK(pos_.IsValid());
}
@@ -59,10 +59,11 @@ bool UsePosition::HasHint() const {
}
-bool UsePosition::RequiresRegister() const { return requires_reg_; }
-
-
-bool UsePosition::RegisterIsBeneficial() const { return register_beneficial_; }
+void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
+ DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
+ flags_ = TypeField::encode(type) |
+ RegisterBeneficialField::encode(register_beneficial);
+}
void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
@@ -117,6 +118,7 @@ bool LiveRange::HasOverlap(UseInterval* target) const {
LiveRange::LiveRange(int id, Zone* zone)
: id_(id),
spilled_(false),
+ has_slot_use_(false),
is_phi_(false),
is_non_loop_phi_(false),
kind_(UNALLOCATED_REGISTERS),
@@ -140,7 +142,7 @@ void LiveRange::set_assigned_register(int reg,
DCHECK(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
// TODO(dcarney): stop aliasing hint operands.
- ConvertUsesToOperand(GetAssignedOperand(operand_cache));
+ ConvertUsesToOperand(GetAssignedOperand(operand_cache), nullptr);
}
@@ -161,16 +163,32 @@ void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
- InstructionOperand* op) {
- auto to_spill = TopLevel()->spills_at_definition_;
- if (to_spill == nullptr) return;
+ InstructionOperand* op,
+ bool might_be_duplicated) {
+ DCHECK(!IsChild());
auto zone = sequence->zone();
- for (; to_spill != nullptr; to_spill = to_spill->next) {
+ for (auto to_spill = spills_at_definition_; to_spill != nullptr;
+ to_spill = to_spill->next) {
auto gap = sequence->GapAt(to_spill->gap_index);
auto move = gap->GetOrCreateParallelMove(GapInstruction::START, zone);
+ // Skip insertion if it's possible that the move exists already as a
+ // constraint move from a fixed output register to a slot.
+ if (might_be_duplicated) {
+ bool found = false;
+ auto move_ops = move->move_operands();
+ for (auto move_op = move_ops->begin(); move_op != move_ops->end();
+ ++move_op) {
+ if (move_op->IsEliminated()) continue;
+ if (move_op->source()->Equals(to_spill->operand) &&
+ move_op->destination()->Equals(op)) {
+ found = true;
+ break;
+ }
+ }
+ if (found) continue;
+ }
move->AddMove(to_spill->operand, op, zone);
}
- TopLevel()->spills_at_definition_ = nullptr;
}
@@ -234,7 +252,7 @@ UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
UsePosition* pos = NextUsePosition(start);
- while (pos != nullptr && !pos->RequiresRegister()) {
+ while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
pos = pos->next();
}
return pos;
@@ -424,7 +442,7 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
void LiveRange::ShortenTo(LifetimePosition start) {
- TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
+ TRACE("Shorten live range %d to [%d\n", id_, start.Value());
DCHECK(first_interval_ != nullptr);
DCHECK(first_interval_->start().Value() <= start.Value());
DCHECK(start.Value() < first_interval_->end().Value());
@@ -434,8 +452,8 @@ void LiveRange::ShortenTo(LifetimePosition start) {
void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
Zone* zone) {
- TraceAlloc("Ensure live range %d in interval [%d %d[\n", id_, start.Value(),
- end.Value());
+ TRACE("Ensure live range %d in interval [%d %d[\n", id_, start.Value(),
+ end.Value());
auto new_end = end;
while (first_interval_ != nullptr &&
first_interval_->start().Value() <= end.Value()) {
@@ -456,8 +474,8 @@ void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
Zone* zone) {
- TraceAlloc("Add to live range %d interval [%d %d[\n", id_, start.Value(),
- end.Value());
+ TRACE("Add to live range %d interval [%d %d[\n", id_, start.Value(),
+ end.Value());
if (first_interval_ == nullptr) {
auto interval = new (zone) UseInterval(start, end);
first_interval_ = interval;
@@ -484,7 +502,7 @@ void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
void LiveRange::AddUsePosition(LifetimePosition pos,
InstructionOperand* operand,
InstructionOperand* hint, Zone* zone) {
- TraceAlloc("Add to live range %d use position %d\n", id_, pos.Value());
+ TRACE("Add to live range %d use position %d\n", id_, pos.Value());
auto use_pos = new (zone) UsePosition(pos, operand, hint);
UsePosition* prev_hint = nullptr;
UsePosition* prev = nullptr;
@@ -509,18 +527,27 @@ void LiveRange::AddUsePosition(LifetimePosition pos,
}
-void LiveRange::ConvertUsesToOperand(InstructionOperand* op) {
- auto use_pos = first_pos();
- while (use_pos != nullptr) {
- DCHECK(Start().Value() <= use_pos->pos().Value() &&
- use_pos->pos().Value() <= End().Value());
-
- if (use_pos->HasOperand()) {
- DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
- !use_pos->RequiresRegister());
- use_pos->operand()->ConvertTo(op->kind(), op->index());
+void LiveRange::ConvertUsesToOperand(InstructionOperand* op,
+ InstructionOperand* spill_op) {
+ for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) {
+ DCHECK(Start().Value() <= pos->pos().Value() &&
+ pos->pos().Value() <= End().Value());
+ if (!pos->HasOperand()) {
+ continue;
+ }
+ switch (pos->type()) {
+ case UsePositionType::kRequiresSlot:
+ if (spill_op != nullptr) {
+ pos->operand()->ConvertTo(spill_op->kind(), spill_op->index());
+ }
+ break;
+ case UsePositionType::kRequiresRegister:
+ DCHECK(op->IsRegister() || op->IsDoubleRegister());
+ // Fall through.
+ case UsePositionType::kAny:
+ pos->operand()->ConvertTo(op->kind(), op->index());
+ break;
}
- use_pos = use_pos->next();
}
}
@@ -679,7 +706,7 @@ int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
InstructionOperand* RegisterAllocator::AllocateFixed(
UnallocatedOperand* operand, int pos, bool is_tagged) {
- TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
+ TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
DCHECK(operand->HasFixedPolicy());
if (operand->HasFixedSlotPolicy()) {
operand->ConvertTo(InstructionOperand::STACK_SLOT,
@@ -694,7 +721,7 @@ InstructionOperand* RegisterAllocator::AllocateFixed(
UNREACHABLE();
}
if (is_tagged) {
- TraceAlloc("Fixed reg is tagged at %d\n", pos);
+ TRACE("Fixed reg is tagged at %d\n", pos);
auto instr = InstructionAt(pos);
if (instr->HasPointerMap()) {
instr->pointer_map()->RecordPointer(operand, code_zone());
@@ -704,15 +731,19 @@ InstructionOperand* RegisterAllocator::AllocateFixed(
}
+LiveRange* RegisterAllocator::NewLiveRange(int index) {
+ // The LiveRange object itself can go in the local zone, but the
+ // InstructionOperand needs to go in the code zone, since it may survive
+ // register allocation.
+ return new (local_zone()) LiveRange(index, code_zone());
+}
+
+
LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
DCHECK(index < config()->num_general_registers());
auto result = fixed_live_ranges()[index];
if (result == nullptr) {
- // TODO(titzer): add a utility method to allocate a new LiveRange:
- // The LiveRange object itself can go in this zone, but the
- // InstructionOperand needs
- // to go in the code zone, since it may survive register allocation.
- result = new (local_zone()) LiveRange(FixedLiveRangeID(index), code_zone());
+ result = NewLiveRange(FixedLiveRangeID(index));
DCHECK(result->IsFixed());
result->kind_ = GENERAL_REGISTERS;
SetLiveRangeAssignedRegister(result, index);
@@ -726,8 +757,7 @@ LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
DCHECK(index < config()->num_aliased_double_registers());
auto result = fixed_double_live_ranges()[index];
if (result == nullptr) {
- result = new (local_zone())
- LiveRange(FixedDoubleLiveRangeID(index), code_zone());
+ result = NewLiveRange(FixedDoubleLiveRangeID(index));
DCHECK(result->IsFixed());
result->kind_ = DOUBLE_REGISTERS;
SetLiveRangeAssignedRegister(result, index);
@@ -743,7 +773,7 @@ LiveRange* RegisterAllocator::LiveRangeFor(int index) {
}
auto result = live_ranges()[index];
if (result == nullptr) {
- result = new (local_zone()) LiveRange(index, code_zone());
+ result = NewLiveRange(index);
live_ranges()[index] = result;
}
return result;
@@ -957,12 +987,15 @@ void RegisterAllocator::AssignSpillSlots() {
void RegisterAllocator::CommitAssignment() {
for (auto range : live_ranges()) {
if (range == nullptr || range->IsEmpty()) continue;
- // Register assignments were committed in set_assigned_register.
- if (range->HasRegisterAssigned()) continue;
auto assigned = range->GetAssignedOperand(operand_cache());
- range->ConvertUsesToOperand(assigned);
- if (range->IsSpilled()) {
- range->CommitSpillsAtDefinition(code(), assigned);
+ InstructionOperand* spill_operand = nullptr;
+ if (!range->TopLevel()->HasNoSpillType()) {
+ spill_operand = range->TopLevel()->GetSpillOperand();
+ }
+ range->ConvertUsesToOperand(assigned, spill_operand);
+ if (!range->IsChild() && spill_operand != nullptr) {
+ range->CommitSpillsAtDefinition(code(), spill_operand,
+ range->has_slot_use());
}
}
}
@@ -977,7 +1010,7 @@ SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) {
bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
if (range->IsChild() || !range->is_phi()) return false;
- DCHECK(range->HasNoSpillType());
+ DCHECK(!range->HasSpillOperand());
auto lookup = phi_map_.find(range->id());
DCHECK(lookup != phi_map_.end());
@@ -1040,12 +1073,16 @@ bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
}
auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
if (pos == nullptr) {
- auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
+ auto spill_range = range->TopLevel()->HasSpillRange()
+ ? range->TopLevel()->GetSpillRange()
+ : AssignSpillRangeToLiveRange(range->TopLevel());
CHECK(first_op_spill->TryMerge(spill_range));
Spill(range);
return true;
} else if (pos->pos().Value() > range->Start().NextInstruction().Value()) {
- auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
+ auto spill_range = range->TopLevel()->HasSpillRange()
+ ? range->TopLevel()->GetSpillRange()
+ : AssignSpillRangeToLiveRange(range->TopLevel());
CHECK(first_op_spill->TryMerge(spill_range));
SpillBetween(range, range->Start(), pos->pos());
DCHECK(UnhandledIsSorted());
@@ -1304,6 +1341,8 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
for (size_t i = 0; i < instr->OutputCount(); i++) {
auto output = instr->OutputAt(i);
if (output->IsUnallocated()) {
+ // Unsupported.
+ DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
live->Remove(out_vreg);
} else if (output->IsConstant()) {
@@ -1344,14 +1383,22 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
use_pos = curr_position.InstructionEnd();
}
- Use(block_start_position, use_pos, input, nullptr);
if (input->IsUnallocated()) {
- live->Add(UnallocatedOperand::cast(input)->virtual_register());
+ UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
+ int vreg = unalloc->virtual_register();
+ live->Add(vreg);
+ if (unalloc->HasSlotPolicy()) {
+ LiveRangeFor(vreg)->set_has_slot_use(true);
+ }
}
+ Use(block_start_position, use_pos, input, nullptr);
}
for (size_t i = 0; i < instr->TempCount(); i++) {
auto temp = instr->TempAt(i);
+ // Unsupported.
+ DCHECK_IMPLIES(temp->IsUnallocated(),
+ !UnallocatedOperand::cast(temp)->HasSlotPolicy());
if (instr->ClobbersTemps()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
@@ -1415,22 +1462,6 @@ void RegisterAllocator::ResolvePhis() {
}
-ParallelMove* RegisterAllocator::GetConnectingParallelMove(
- LifetimePosition pos) {
- int index = pos.InstructionIndex();
- if (code()->IsGapAt(index)) {
- auto gap = code()->GapAt(index);
- return gap->GetOrCreateParallelMove(
- pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
- code_zone());
- }
- int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
- return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
- (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::START,
- code_zone());
-}
-
-
const InstructionBlock* RegisterAllocator::GetInstructionBlock(
LifetimePosition pos) {
return code()->GetInstructionBlock(pos.InstructionIndex());
@@ -1438,34 +1469,75 @@ const InstructionBlock* RegisterAllocator::GetInstructionBlock(
void RegisterAllocator::ConnectRanges() {
+ ZoneMap<std::pair<ParallelMove*, InstructionOperand*>, InstructionOperand*>
+ delayed_insertion_map(local_zone());
for (auto first_range : live_ranges()) {
if (first_range == nullptr || first_range->IsChild()) continue;
- auto second_range = first_range->next();
- while (second_range != nullptr) {
+ for (auto second_range = first_range->next(); second_range != nullptr;
+ first_range = second_range, second_range = second_range->next()) {
auto pos = second_range->Start();
- if (!second_range->IsSpilled()) {
- // Add gap move if the two live ranges touch and there is no block
- // boundary.
- if (first_range->End().Value() == pos.Value()) {
- bool should_insert = true;
- if (IsBlockBoundary(pos)) {
- should_insert =
- CanEagerlyResolveControlFlow(GetInstructionBlock(pos));
- }
- if (should_insert) {
- auto move = GetConnectingParallelMove(pos);
- auto prev_operand =
- first_range->GetAssignedOperand(operand_cache());
- auto cur_operand =
- second_range->GetAssignedOperand(operand_cache());
- move->AddMove(prev_operand, cur_operand, code_zone());
- }
- }
+ // Add gap move if the two live ranges touch and there is no block
+ // boundary.
+ if (second_range->IsSpilled()) continue;
+ if (first_range->End().Value() != pos.Value()) continue;
+ if (IsBlockBoundary(pos) &&
+ !CanEagerlyResolveControlFlow(GetInstructionBlock(pos))) {
+ continue;
+ }
+ auto prev_operand = first_range->GetAssignedOperand(operand_cache());
+ auto cur_operand = second_range->GetAssignedOperand(operand_cache());
+ if (prev_operand->Equals(cur_operand)) continue;
+ int index = pos.InstructionIndex();
+ bool delay_insertion = false;
+ GapInstruction::InnerPosition gap_pos;
+ int gap_index = index;
+ if (code()->IsGapAt(index)) {
+ gap_pos = pos.IsInstructionStart() ? GapInstruction::START
+ : GapInstruction::END;
+ } else {
+ gap_index = pos.IsInstructionStart() ? (index - 1) : (index + 1);
+ delay_insertion = gap_index < index;
+ gap_pos = delay_insertion ? GapInstruction::END : GapInstruction::START;
+ }
+ auto move = code()->GapAt(gap_index)->GetOrCreateParallelMove(
+ gap_pos, code_zone());
+ if (!delay_insertion) {
+ move->AddMove(prev_operand, cur_operand, code_zone());
+ } else {
+ delayed_insertion_map.insert(
+ std::make_pair(std::make_pair(move, prev_operand), cur_operand));
}
- first_range = second_range;
- second_range = second_range->next();
}
}
+ if (delayed_insertion_map.empty()) return;
+ // Insert all the moves which should occur after the stored move.
+ ZoneVector<MoveOperands> to_insert(local_zone());
+ ZoneVector<MoveOperands*> to_eliminate(local_zone());
+ to_insert.reserve(4);
+ to_eliminate.reserve(4);
+ auto move = delayed_insertion_map.begin()->first.first;
+ for (auto it = delayed_insertion_map.begin();; ++it) {
+ bool done = it == delayed_insertion_map.end();
+ if (done || it->first.first != move) {
+ // Commit the MoveOperands for current ParallelMove.
+ for (auto move_ops : to_eliminate) {
+ move_ops->Eliminate();
+ }
+ for (auto move_ops : to_insert) {
+ move->AddMove(move_ops.source(), move_ops.destination(), code_zone());
+ }
+ if (done) break;
+ // Reset state.
+ to_eliminate.clear();
+ to_insert.clear();
+ move = it->first.first;
+ }
+ // Gather all MoveOperands for a single ParallelMove.
+ MoveOperands move_ops(it->first.second, it->second);
+ auto eliminate = move->PrepareInsertAfter(&move_ops);
+ to_insert.push_back(move_ops);
+ if (eliminate != nullptr) to_eliminate.push_back(eliminate);
+ }
}
@@ -1681,8 +1753,7 @@ void RegisterAllocator::BuildLiveRanges() {
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
- auto block =
- code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(block_id));
+ auto block = code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
auto live = ComputeLiveOut(block);
// Initially consider all live_out values live for the entire block. We
// will shorten these intervals if necessary.
@@ -1749,20 +1820,25 @@ void RegisterAllocator::BuildLiveRanges() {
for (auto range : live_ranges()) {
if (range == nullptr) continue;
range->kind_ = RequiredRegisterKind(range->id());
+ // Give slots to all ranges with a non fixed slot use.
+ if (range->has_slot_use() && range->HasNoSpillType()) {
+ AssignSpillRangeToLiveRange(range);
+ }
// TODO(bmeurer): This is a horrible hack to make sure that for constant
// live ranges, every use requires the constant to be in a register.
// Without this hack, all uses with "any" policy would get the constant
// operand assigned.
if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next_) {
- pos->register_beneficial_ = true;
- // TODO(dcarney): should the else case assert requires_reg_ == false?
+ if (pos->type() == UsePositionType::kRequiresSlot) continue;
+ UsePositionType new_type = UsePositionType::kAny;
// Can't mark phis as needing a register.
if (!code()
->InstructionAt(pos->pos().InstructionIndex())
->IsGapMoves()) {
- pos->requires_reg_ = true;
+ new_type = UsePositionType::kRequiresRegister;
}
+ pos->set_type(new_type, true);
}
}
}
@@ -1860,13 +1936,13 @@ void RegisterAllocator::PopulatePointerMaps() {
if (range->HasSpillOperand() &&
safe_point >= range->spill_start_index() &&
!range->GetSpillOperand()->IsConstant()) {
- TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
- range->id(), range->spill_start_index(), safe_point);
+ TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
+ range->id(), range->spill_start_index(), safe_point);
map->RecordPointer(range->GetSpillOperand(), code_zone());
}
if (!cur->IsSpilled()) {
- TraceAlloc(
+ TRACE(
"Pointer in register for range %d (start at %d) "
"at safe point %d\n",
cur->id(), cur->Start().Value(), safe_point);
@@ -1933,11 +2009,10 @@ void RegisterAllocator::AllocateRegisters() {
#ifdef DEBUG
allocation_finger_ = position;
#endif
- TraceAlloc("Processing interval %d start=%d\n", current->id(),
- position.Value());
+ TRACE("Processing interval %d start=%d\n", current->id(), position.Value());
if (!current->HasNoSpillType()) {
- TraceAlloc("Live range %d already has a spill operand\n", current->id());
+ TRACE("Live range %d already has a spill operand\n", current->id());
auto next_pos = position;
if (code()->IsGapAt(next_pos.InstructionIndex())) {
next_pos = next_pos.NextInstruction();
@@ -2018,13 +2093,13 @@ RegisterKind RegisterAllocator::RequiredRegisterKind(
void RegisterAllocator::AddToActive(LiveRange* range) {
- TraceAlloc("Add live range %d to active\n", range->id());
+ TRACE("Add live range %d to active\n", range->id());
active_live_ranges().push_back(range);
}
void RegisterAllocator::AddToInactive(LiveRange* range) {
- TraceAlloc("Add live range %d to inactive\n", range->id());
+ TRACE("Add live range %d to inactive\n", range->id());
inactive_live_ranges().push_back(range);
}
@@ -2037,13 +2112,13 @@ void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
--i) {
auto cur_range = unhandled_live_ranges().at(i);
if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
- TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+ TRACE("Add live range %d to unhandled at %d\n", range->id(), i + 1);
auto it = unhandled_live_ranges().begin() + (i + 1);
unhandled_live_ranges().insert(it, range);
DCHECK(UnhandledIsSorted());
return;
}
- TraceAlloc("Add live range %d to unhandled at start\n", range->id());
+ TRACE("Add live range %d to unhandled at start\n", range->id());
unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
DCHECK(UnhandledIsSorted());
}
@@ -2052,7 +2127,7 @@ void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
if (range == nullptr || range->IsEmpty()) return;
DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
- TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
+ TRACE("Add live range %d to unhandled unsorted at end\n", range->id());
unhandled_live_ranges().push_back(range);
}
@@ -2069,7 +2144,7 @@ static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
// at the end of the array list. This is convenient for the register allocation
// algorithm because it is efficient to remove elements from the end.
void RegisterAllocator::SortUnhandled() {
- TraceAlloc("Sort unhandled\n");
+ TRACE("Sort unhandled\n");
std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
&UnhandledSortHelper);
}
@@ -2088,27 +2163,27 @@ bool RegisterAllocator::UnhandledIsSorted() {
void RegisterAllocator::ActiveToHandled(LiveRange* range) {
RemoveElement(&active_live_ranges(), range);
- TraceAlloc("Moving live range %d from active to handled\n", range->id());
+ TRACE("Moving live range %d from active to handled\n", range->id());
}
void RegisterAllocator::ActiveToInactive(LiveRange* range) {
RemoveElement(&active_live_ranges(), range);
inactive_live_ranges().push_back(range);
- TraceAlloc("Moving live range %d from active to inactive\n", range->id());
+ TRACE("Moving live range %d from active to inactive\n", range->id());
}
void RegisterAllocator::InactiveToHandled(LiveRange* range) {
RemoveElement(&inactive_live_ranges(), range);
- TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
+ TRACE("Moving live range %d from inactive to handled\n", range->id());
}
void RegisterAllocator::InactiveToActive(LiveRange* range) {
RemoveElement(&inactive_live_ranges(), range);
active_live_ranges().push_back(range);
- TraceAlloc("Moving live range %d from inactive to active\n", range->id());
+ TRACE("Moving live range %d from inactive to active\n", range->id());
}
@@ -2135,15 +2210,14 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
auto hint = current->FirstHint();
if (hint != nullptr && (hint->IsRegister() || hint->IsDoubleRegister())) {
int register_index = hint->index();
- TraceAlloc(
- "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
- RegisterName(register_index), free_until_pos[register_index].Value(),
- current->id(), current->End().Value());
+ TRACE("Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+ RegisterName(register_index), free_until_pos[register_index].Value(),
+ current->id(), current->End().Value());
// The desired register is free until the end of the current live range.
if (free_until_pos[register_index].Value() >= current->End().Value()) {
- TraceAlloc("Assigning preferred reg %s to live range %d\n",
- RegisterName(register_index), current->id());
+ TRACE("Assigning preferred reg %s to live range %d\n",
+ RegisterName(register_index), current->id());
SetLiveRangeAssignedRegister(current, register_index);
return true;
}
@@ -2174,8 +2248,8 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
// Register reg is available at the range start and is free until
// the range end.
DCHECK(pos.Value() >= current->End().Value());
- TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg),
- current->id());
+ TRACE("Assigning free reg %s to live range %d\n", RegisterName(reg),
+ current->id());
SetLiveRangeAssignedRegister(current, reg);
return true;
@@ -2253,8 +2327,8 @@ void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
// Register reg is not blocked for the whole range.
DCHECK(block_pos[reg].Value() >= current->End().Value());
- TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
- current->id());
+ TRACE("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
+ current->id());
SetLiveRangeAssignedRegister(current, reg);
// This register was not free. Thus we need to find and spill
@@ -2362,14 +2436,15 @@ bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
LifetimePosition pos) {
DCHECK(!range->IsFixed());
- TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
+ TRACE("Splitting live range %d at %d\n", range->id(), pos.Value());
if (pos.Value() <= range->Start().Value()) return range;
- // We can't properly connect liveranges if split occured at the end
- // of control instruction.
+ // We can't properly connect liveranges if splitting occurred at the end
+ // a block.
DCHECK(pos.IsInstructionStart() ||
- !InstructionAt(pos.InstructionIndex())->IsControl());
+ (code()->GetInstructionBlock(pos.InstructionIndex()))
+ ->last_instruction_index() != pos.InstructionIndex());
int vreg = GetVirtualRegister();
auto result = LiveRangeFor(vreg);
@@ -2382,8 +2457,8 @@ LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
DCHECK(!range->IsFixed());
- TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
- range->id(), start.Value(), end.Value());
+ TRACE("Splitting live range %d in position between [%d, %d]\n", range->id(),
+ start.Value(), end.Value());
auto split_pos = FindOptimalSplitPos(start, end);
DCHECK(split_pos.Value() >= start.Value());
@@ -2472,7 +2547,7 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
void RegisterAllocator::Spill(LiveRange* range) {
DCHECK(!range->IsSpilled());
- TraceAlloc("Spilling live range %d\n", range->id());
+ TRACE("Spilling live range %d\n", range->id());
auto first = range->TopLevel();
if (first->HasNoSpillType()) {
AssignSpillRangeToLiveRange(first);
diff --git a/deps/v8/src/compiler/register-allocator.h b/deps/v8/src/compiler/register-allocator.h
index d7dd1b7358..9ee778d0bd 100644
--- a/deps/v8/src/compiler/register-allocator.h
+++ b/deps/v8/src/compiler/register-allocator.h
@@ -141,6 +141,9 @@ class UseInterval FINAL : public ZoneObject {
};
+enum class UsePositionType : uint8_t { kAny, kRequiresRegister, kRequiresSlot };
+
+
// Representation of a use position.
class UsePosition FINAL : public ZoneObject {
public:
@@ -152,22 +155,27 @@ class UsePosition FINAL : public ZoneObject {
InstructionOperand* hint() const { return hint_; }
bool HasHint() const;
- bool RequiresRegister() const;
- bool RegisterIsBeneficial() const;
+ bool RegisterIsBeneficial() const {
+ return RegisterBeneficialField::decode(flags_);
+ }
+ UsePositionType type() const { return TypeField::decode(flags_); }
LifetimePosition pos() const { return pos_; }
UsePosition* next() const { return next_; }
void set_next(UsePosition* next) { next_ = next; }
+ void set_type(UsePositionType type, bool register_beneficial);
InstructionOperand* const operand_;
InstructionOperand* const hint_;
LifetimePosition const pos_;
UsePosition* next_;
- bool requires_reg_ : 1;
- bool register_beneficial_ : 1;
private:
+ typedef BitField8<UsePositionType, 0, 2> TypeField;
+ typedef BitField8<bool, 2, 1> RegisterBeneficialField;
+ uint8_t flags_;
+
DISALLOW_COPY_AND_ASSIGN(UsePosition);
};
@@ -233,6 +241,8 @@ class LiveRange FINAL : public ZoneObject {
void set_is_non_loop_phi(bool is_non_loop_phi) {
is_non_loop_phi_ = is_non_loop_phi;
}
+ bool has_slot_use() const { return has_slot_use_; }
+ void set_has_slot_use(bool has_slot_use) { has_slot_use_ = has_slot_use; }
// Returns use position in this live range that follows both start
// and last processed use position.
@@ -309,7 +319,8 @@ class LiveRange FINAL : public ZoneObject {
void SetSpillRange(SpillRange* spill_range);
void CommitSpillOperand(InstructionOperand* operand);
void CommitSpillsAtDefinition(InstructionSequence* sequence,
- InstructionOperand* operand);
+ InstructionOperand* operand,
+ bool might_be_duplicated);
void SetSpillStartIndex(int start) {
spill_start_index_ = Min(start, spill_start_index_);
@@ -338,16 +349,18 @@ class LiveRange FINAL : public ZoneObject {
private:
struct SpillAtDefinitionList;
- void ConvertUsesToOperand(InstructionOperand* op);
+ void ConvertUsesToOperand(InstructionOperand* op,
+ InstructionOperand* spill_op);
UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
void AdvanceLastProcessedMarker(UseInterval* to_start_of,
LifetimePosition but_not_past) const;
// TODO(dcarney): pack this structure better.
int id_;
- bool spilled_;
- bool is_phi_;
- bool is_non_loop_phi_;
+ bool spilled_ : 1;
+ bool has_slot_use_ : 1; // Relevant only for parent.
+ bool is_phi_ : 1;
+ bool is_non_loop_phi_ : 1;
RegisterKind kind_;
int assigned_register_;
UseInterval* last_interval_;
@@ -457,6 +470,9 @@ class RegisterAllocator FINAL : public ZoneObject {
// Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const;
+ // Creates a new live range.
+ LiveRange* NewLiveRange(int index);
+
// This zone is for InstructionOperands and moves that live beyond register
// allocation.
Zone* code_zone() const { return code()->zone(); }
@@ -564,10 +580,6 @@ class RegisterAllocator FINAL : public ZoneObject {
void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
- // Return parallel move that should be used to connect ranges split at the
- // given position.
- ParallelMove* GetConnectingParallelMove(LifetimePosition pos);
-
// Return the block which contains give lifetime position.
const InstructionBlock* GetInstructionBlock(LifetimePosition pos);
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index 8924ae5440..f30e5f61ce 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -100,10 +100,14 @@ std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
return os << "none";
case BasicBlock::kGoto:
return os << "goto";
+ case BasicBlock::kCall:
+ return os << "call";
case BasicBlock::kBranch:
return os << "branch";
case BasicBlock::kSwitch:
return os << "switch";
+ case BasicBlock::kDeoptimize:
+ return os << "deoptimize";
case BasicBlock::kReturn:
return os << "return";
case BasicBlock::kThrow:
@@ -119,11 +123,6 @@ std::ostream& operator<<(std::ostream& os, const BasicBlock::Id& id) {
}
-std::ostream& operator<<(std::ostream& os, const BasicBlock::RpoNumber& rpo) {
- return os << rpo.ToSize();
-}
-
-
Schedule::Schedule(Zone* zone, size_t node_count_hint)
: zone_(zone),
all_blocks_(zone),
@@ -194,16 +193,27 @@ void Schedule::AddNode(BasicBlock* block, Node* node) {
void Schedule::AddGoto(BasicBlock* block, BasicBlock* succ) {
- DCHECK(block->control() == BasicBlock::kNone);
+ DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kGoto);
AddSuccessor(block, succ);
}
+void Schedule::AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
+ BasicBlock* exception_block) {
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ DCHECK_EQ(IrOpcode::kCall, call->opcode());
+ block->set_control(BasicBlock::kCall);
+ AddSuccessor(block, success_block);
+ AddSuccessor(block, exception_block);
+ SetControlInput(block, call);
+}
+
+
void Schedule::AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock) {
- DCHECK(block->control() == BasicBlock::kNone);
- DCHECK(branch->opcode() == IrOpcode::kBranch);
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ DCHECK_EQ(IrOpcode::kBranch, branch->opcode());
block->set_control(BasicBlock::kBranch);
AddSuccessor(block, tblock);
AddSuccessor(block, fblock);
@@ -224,15 +234,23 @@ void Schedule::AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
void Schedule::AddReturn(BasicBlock* block, Node* input) {
- DCHECK(block->control() == BasicBlock::kNone);
+ DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kReturn);
SetControlInput(block, input);
if (block != end()) AddSuccessor(block, end());
}
+void Schedule::AddDeoptimize(BasicBlock* block, Node* input) {
+ DCHECK_EQ(BasicBlock::kNone, block->control());
+ block->set_control(BasicBlock::kDeoptimize);
+ SetControlInput(block, input);
+ if (block != end()) AddSuccessor(block, end());
+}
+
+
void Schedule::AddThrow(BasicBlock* block, Node* input) {
- DCHECK(block->control() == BasicBlock::kNone);
+ DCHECK_EQ(BasicBlock::kNone, block->control());
block->set_control(BasicBlock::kThrow);
SetControlInput(block, input);
if (block != end()) AddSuccessor(block, end());
@@ -241,8 +259,8 @@ void Schedule::AddThrow(BasicBlock* block, Node* input) {
void Schedule::InsertBranch(BasicBlock* block, BasicBlock* end, Node* branch,
BasicBlock* tblock, BasicBlock* fblock) {
- DCHECK(block->control() != BasicBlock::kNone);
- DCHECK(end->control() == BasicBlock::kNone);
+ DCHECK_NE(BasicBlock::kNone, block->control());
+ DCHECK_EQ(BasicBlock::kNone, end->control());
end->set_control(block->control());
block->set_control(BasicBlock::kBranch);
MoveSuccessors(block, end);
@@ -306,14 +324,14 @@ void Schedule::SetBlockForNode(BasicBlock* block, Node* node) {
std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (BasicBlock* block : *s.rpo_order()) {
- os << "--- BLOCK B" << block->id();
+ os << "--- BLOCK B" << block->rpo_number();
if (block->deferred()) os << " (deferred)";
if (block->PredecessorCount() != 0) os << " <- ";
bool comma = false;
for (BasicBlock const* predecessor : block->predecessors()) {
if (comma) os << ", ";
comma = true;
- os << "B" << predecessor->id();
+ os << "B" << predecessor->rpo_number();
}
os << " ---\n";
for (Node* node : *block) {
@@ -342,7 +360,7 @@ std::ostream& operator<<(std::ostream& os, const Schedule& s) {
for (BasicBlock const* successor : block->successors()) {
if (comma) os << ", ";
comma = true;
- os << "B" << successor->id();
+ os << "B" << successor->rpo_number();
}
os << "\n";
}
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index d4d64533b5..d940e541db 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -30,12 +30,15 @@ class BasicBlock FINAL : public ZoneObject {
public:
// Possible control nodes that can end a block.
enum Control {
- kNone, // Control not initialized yet.
- kGoto, // Goto a single successor block.
- kBranch, // Branch if true to first successor, otherwise second.
- kSwitch, // Table dispatch to one of the successor blocks.
- kReturn, // Return a value from this method.
- kThrow // Throw an exception.
+ kNone, // Control not initialized yet.
+ kGoto, // Goto a single successor block.
+ kCall, // Call with continuation as first successor, exception
+ // second.
+ kBranch, // Branch if true to first successor, otherwise second.
+ kSwitch, // Table dispatch to one of the successor blocks.
+ kDeoptimize, // Return a value from this method.
+ kReturn, // Return a value from this method.
+ kThrow // Throw an exception.
};
class Id {
@@ -50,35 +53,6 @@ class BasicBlock FINAL : public ZoneObject {
size_t index_;
};
- static const int kInvalidRpoNumber = -1;
- class RpoNumber FINAL {
- public:
- int ToInt() const {
- DCHECK(IsValid());
- return index_;
- }
- size_t ToSize() const {
- DCHECK(IsValid());
- return static_cast<size_t>(index_);
- }
- bool IsValid() const { return index_ >= 0; }
- static RpoNumber FromInt(int index) { return RpoNumber(index); }
- static RpoNumber Invalid() { return RpoNumber(kInvalidRpoNumber); }
-
- bool IsNext(const RpoNumber other) const {
- DCHECK(IsValid());
- return other.index_ == this->index_ + 1;
- }
-
- bool operator==(RpoNumber other) const {
- return this->index_ == other.index_;
- }
-
- private:
- explicit RpoNumber(int32_t index) : index_(index) {}
- int32_t index_;
- };
-
BasicBlock(Zone* zone, Id id);
Id id() const { return id_; }
@@ -159,7 +133,6 @@ class BasicBlock FINAL : public ZoneObject {
int32_t loop_number() const { return loop_number_; }
void set_loop_number(int32_t loop_number) { loop_number_ = loop_number; }
- RpoNumber GetRpoNumber() const { return RpoNumber::FromInt(rpo_number_); }
int32_t rpo_number() const { return rpo_number_; }
void set_rpo_number(int32_t rpo_number);
@@ -197,7 +170,6 @@ class BasicBlock FINAL : public ZoneObject {
std::ostream& operator<<(std::ostream&, const BasicBlock::Control&);
std::ostream& operator<<(std::ostream&, const BasicBlock::Id&);
-std::ostream& operator<<(std::ostream&, const BasicBlock::RpoNumber&);
// A schedule represents the result of assigning nodes to basic blocks
@@ -233,6 +205,10 @@ class Schedule FINAL : public ZoneObject {
// BasicBlock building: add a goto to the end of {block}.
void AddGoto(BasicBlock* block, BasicBlock* succ);
+ // BasicBlock building: add a call at the end of {block}.
+ void AddCall(BasicBlock* block, Node* call, BasicBlock* success_block,
+ BasicBlock* exception_block);
+
// BasicBlock building: add a branch at the end of {block}.
void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
BasicBlock* fblock);
@@ -241,6 +217,9 @@ class Schedule FINAL : public ZoneObject {
void AddSwitch(BasicBlock* block, Node* sw, BasicBlock** succ_blocks,
size_t succ_count);
+ // BasicBlock building: add a deoptimize at the end of {block}.
+ void AddDeoptimize(BasicBlock* block, Node* input);
+
// BasicBlock building: add a return at the end of {block}.
void AddReturn(BasicBlock* block, Node* input);
diff --git a/deps/v8/src/compiler/scheduler.cc b/deps/v8/src/compiler/scheduler.cc
index 6e105e3713..1185cacab7 100644
--- a/deps/v8/src/compiler/scheduler.cc
+++ b/deps/v8/src/compiler/scheduler.cc
@@ -4,6 +4,8 @@
#include "src/compiler/scheduler.h"
+#include <iomanip>
+
#include "src/bit-vector.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/control-equivalence.h"
@@ -17,15 +19,10 @@ namespace v8 {
namespace internal {
namespace compiler {
-static inline void Trace(const char* msg, ...) {
- if (FLAG_trace_turbo_scheduler) {
- va_list arguments;
- va_start(arguments, msg);
- base::OS::VPrint(msg, arguments);
- va_end(arguments);
- }
-}
-
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
+ } while (false)
Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
: zone_(zone),
@@ -64,7 +61,6 @@ Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
- DCHECK(node->id() < static_cast<int>(node_data_.size()));
return &node_data_[node->id()];
}
@@ -172,7 +168,7 @@ void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
++(GetData(node)->unscheduled_count_);
if (FLAG_trace_turbo_scheduler) {
- Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
+ TRACE(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
GetData(node)->unscheduled_count_);
}
@@ -196,12 +192,12 @@ void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
DCHECK(GetData(node)->unscheduled_count_ > 0);
--(GetData(node)->unscheduled_count_);
if (FLAG_trace_turbo_scheduler) {
- Trace(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
+ TRACE(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
GetData(node)->unscheduled_count_);
}
if (GetData(node)->unscheduled_count_ == 0) {
- Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
+ TRACE(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
schedule_queue_.push(node);
}
}
@@ -267,7 +263,7 @@ class CFGBuilder : public ZoneObject {
// Use control dependence equivalence to find a canonical single-entry
// single-exit region that makes up a minimal component to be scheduled.
if (IsSingleEntrySingleExitRegion(node, exit)) {
- Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
+ TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
DCHECK(!component_entry_);
component_entry_ = node;
continue;
@@ -286,7 +282,7 @@ class CFGBuilder : public ZoneObject {
}
private:
- // TODO(mstarzinger): Only for Scheduler::FuseFloatingControl.
+ friend class ScheduleLateNodeVisitor;
friend class Scheduler;
void FixNode(BasicBlock* block, Node* node) {
@@ -320,6 +316,11 @@ class CFGBuilder : public ZoneObject {
case IrOpcode::kSwitch:
BuildBlocksForSuccessors(node);
break;
+ case IrOpcode::kCall:
+ if (IsExceptionalCall(node)) {
+ BuildBlocksForSuccessors(node);
+ }
+ break;
default:
break;
}
@@ -339,6 +340,10 @@ class CFGBuilder : public ZoneObject {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectSwitch(node);
break;
+ case IrOpcode::kDeoptimize:
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectDeoptimize(node);
+ break;
case IrOpcode::kReturn:
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectReturn(node);
@@ -347,6 +352,12 @@ class CFGBuilder : public ZoneObject {
scheduler_->UpdatePlacement(node, Scheduler::kFixed);
ConnectThrow(node);
break;
+ case IrOpcode::kCall:
+ if (IsExceptionalCall(node)) {
+ scheduler_->UpdatePlacement(node, Scheduler::kFixed);
+ ConnectCall(node);
+ }
+ break;
default:
break;
}
@@ -356,7 +367,7 @@ class CFGBuilder : public ZoneObject {
BasicBlock* block = schedule_->block(node);
if (block == NULL) {
block = schedule_->NewBasicBlock();
- Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(),
+ TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
node->op()->mnemonic());
FixNode(block, node);
}
@@ -381,6 +392,31 @@ class CFGBuilder : public ZoneObject {
}
}
+ BasicBlock* FindPredecessorBlock(Node* node) {
+ BasicBlock* predecessor_block = nullptr;
+ while (true) {
+ predecessor_block = schedule_->block(node);
+ if (predecessor_block != nullptr) break;
+ node = NodeProperties::GetControlInput(node);
+ }
+ return predecessor_block;
+ }
+
+ void ConnectCall(Node* call) {
+ BasicBlock* successor_blocks[2];
+ CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
+
+ // Consider the exception continuation to be deferred.
+ successor_blocks[1]->set_deferred(true);
+
+ Node* call_control = NodeProperties::GetControlInput(call);
+ BasicBlock* call_block = FindPredecessorBlock(call_control);
+ TraceConnect(call, call_block, successor_blocks[0]);
+ TraceConnect(call, call_block, successor_blocks[1]);
+ schedule_->AddCall(call_block, call, successor_blocks[0],
+ successor_blocks[1]);
+ }
+
void ConnectBranch(Node* branch) {
BasicBlock* successor_blocks[2];
CollectSuccessorBlocks(branch, successor_blocks,
@@ -404,10 +440,8 @@ class CFGBuilder : public ZoneObject {
schedule_->InsertBranch(component_start_, component_end_, branch,
successor_blocks[0], successor_blocks[1]);
} else {
- Node* branch_block_node = NodeProperties::GetControlInput(branch);
- BasicBlock* branch_block = schedule_->block(branch_block_node);
- DCHECK_NOT_NULL(branch_block);
-
+ Node* branch_control = NodeProperties::GetControlInput(branch);
+ BasicBlock* branch_block = FindPredecessorBlock(branch_control);
TraceConnect(branch, branch_block, successor_blocks[0]);
TraceConnect(branch, branch_block, successor_blocks[1]);
schedule_->AddBranch(branch_block, branch, successor_blocks[0],
@@ -428,14 +462,12 @@ class CFGBuilder : public ZoneObject {
schedule_->InsertSwitch(component_start_, component_end_, sw,
successor_blocks, successor_count);
} else {
- Node* sw_block_node = NodeProperties::GetControlInput(sw);
- BasicBlock* sw_block = schedule_->block(sw_block_node);
- DCHECK_NOT_NULL(sw_block);
-
+ Node* switch_control = NodeProperties::GetControlInput(sw);
+ BasicBlock* switch_block = FindPredecessorBlock(switch_control);
for (size_t index = 0; index < successor_count; ++index) {
- TraceConnect(sw, sw_block, successor_blocks[index]);
+ TraceConnect(sw, switch_block, successor_blocks[index]);
}
- schedule_->AddSwitch(sw_block, sw, successor_blocks, successor_count);
+ schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
}
}
@@ -448,22 +480,29 @@ class CFGBuilder : public ZoneObject {
// For all of the merge's control inputs, add a goto at the end to the
// merge's basic block.
for (Node* const input : merge->inputs()) {
- BasicBlock* predecessor_block = schedule_->block(input);
+ BasicBlock* predecessor_block = FindPredecessorBlock(input);
TraceConnect(merge, predecessor_block, block);
schedule_->AddGoto(predecessor_block, block);
}
}
void ConnectReturn(Node* ret) {
- Node* return_block_node = NodeProperties::GetControlInput(ret);
- BasicBlock* return_block = schedule_->block(return_block_node);
+ Node* return_control = NodeProperties::GetControlInput(ret);
+ BasicBlock* return_block = FindPredecessorBlock(return_control);
TraceConnect(ret, return_block, NULL);
schedule_->AddReturn(return_block, ret);
}
+ void ConnectDeoptimize(Node* deopt) {
+ Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
+ BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
+ TraceConnect(deopt, deoptimize_block, NULL);
+ schedule_->AddDeoptimize(deoptimize_block, deopt);
+ }
+
void ConnectThrow(Node* thr) {
- Node* throw_block_node = NodeProperties::GetControlInput(thr);
- BasicBlock* throw_block = schedule_->block(throw_block_node);
+ Node* throw_control = NodeProperties::GetControlInput(thr);
+ BasicBlock* throw_block = FindPredecessorBlock(throw_control);
TraceConnect(thr, throw_block, NULL);
schedule_->AddThrow(throw_block, thr);
}
@@ -471,12 +510,19 @@ class CFGBuilder : public ZoneObject {
void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
DCHECK_NOT_NULL(block);
if (succ == NULL) {
- Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
- block->id().ToInt());
+ TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
+ node->op()->mnemonic(), block->id().ToInt());
} else {
- Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
- block->id().ToInt(), succ->id().ToInt());
+ TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
+ node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
+ }
+ }
+
+ bool IsExceptionalCall(Node* node) {
+ for (Node* const use : node->uses()) {
+ if (use->opcode() == IrOpcode::kIfException) return true;
}
+ return false;
}
bool IsFinalMerge(Node* node) {
@@ -509,7 +555,7 @@ class CFGBuilder : public ZoneObject {
void Scheduler::BuildCFG() {
- Trace("--- CREATING CFG -------------------------------------------\n");
+ TRACE("--- CREATING CFG -------------------------------------------\n");
// Instantiate a new control equivalence algorithm for the graph.
equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
@@ -549,7 +595,8 @@ class SpecialRPONumberer : public ZoneObject {
loops_(zone),
backedges_(zone),
stack_(zone),
- previous_block_count_(0) {}
+ previous_block_count_(0),
+ empty_(0, zone) {}
// Computes the special reverse-post-order for the main control flow graph,
// that is for the graph spanned between the schedule's start and end blocks.
@@ -586,6 +633,14 @@ class SpecialRPONumberer : public ZoneObject {
#endif
}
+ const ZoneList<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
+ if (HasLoopNumber(block)) {
+ LoopInfo const& loop = loops_[GetLoopNumber(block)];
+ if (loop.outgoing) return *loop.outgoing;
+ }
+ return empty_;
+ }
+
private:
typedef std::pair<BasicBlock*, size_t> Backedge;
@@ -828,18 +883,19 @@ class SpecialRPONumberer : public ZoneObject {
BasicBlock* end = current_loop->end;
current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
current_header = current_loop->header;
- Trace("B%d is a loop header, increment loop depth to %d\n",
+ TRACE("id:%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
}
current->set_loop_depth(loop_depth);
if (current->loop_header() == NULL) {
- Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(),
+ TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
current->loop_depth());
} else {
- Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(),
- current->loop_header()->id().ToInt(), current->loop_depth());
+ TRACE("id:%d has loop header id:%d, (depth == %d)\n",
+ current->id().ToInt(), current->loop_header()->id().ToInt(),
+ current->loop_depth());
}
}
}
@@ -905,30 +961,27 @@ class SpecialRPONumberer : public ZoneObject {
os << " (";
for (size_t i = 0; i < loops_.size(); i++) {
if (i > 0) os << " ";
- os << "B" << loops_[i].header->id();
+ os << "id:" << loops_[i].header->id();
}
os << ")";
}
os << ":\n";
for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
- BasicBlock::Id bid = block->id();
- // TODO(jarin,svenpanne): Add formatting here once we have support for
- // that in streams (we want an equivalent of PrintF("%5d:", x) here).
- os << " " << block->rpo_number() << ":";
+ os << std::setw(5) << "B" << block->rpo_number() << ":";
for (size_t i = 0; i < loops_.size(); i++) {
bool range = loops_[i].header->LoopContains(block);
bool membership = loops_[i].header != block && range;
os << (membership ? " |" : " ");
os << (range ? "x" : " ");
}
- os << " B" << bid << ": ";
+ os << " id:" << block->id() << ": ";
if (block->loop_end() != NULL) {
- os << " range: [" << block->rpo_number() << ", "
+ os << " range: [B" << block->rpo_number() << ", B"
<< block->loop_end()->rpo_number() << ")";
}
if (block->loop_header() != NULL) {
- os << " header: B" << block->loop_header()->id();
+ os << " header: id:" << block->loop_header()->id();
}
if (block->loop_depth() > 0) {
os << " depth: " << block->loop_depth();
@@ -969,7 +1022,7 @@ class SpecialRPONumberer : public ZoneObject {
DCHECK(block->rpo_number() == links + header->rpo_number());
links++;
block = block->rpo_next();
- DCHECK(links < static_cast<int>(2 * order->size())); // cycle?
+ DCHECK_LT(links, static_cast<int>(2 * order->size())); // cycle?
}
DCHECK(links > 0);
DCHECK(links == end->rpo_number() - header->rpo_number());
@@ -1008,6 +1061,7 @@ class SpecialRPONumberer : public ZoneObject {
ZoneVector<Backedge> backedges_;
ZoneVector<SpecialRPOStackFrame> stack_;
size_t previous_block_count_;
+ ZoneList<BasicBlock*> const empty_;
};
@@ -1021,7 +1075,7 @@ BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
void Scheduler::ComputeSpecialRPONumbering() {
- Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
+ TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
// Compute the special reverse-post-order for basic blocks.
special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
@@ -1035,6 +1089,7 @@ void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
auto end = block->predecessors().end();
DCHECK(pred != end); // All blocks except start have predecessors.
BasicBlock* dominator = *pred;
+ bool deferred = dominator->deferred();
// For multiple predecessors, walk up the dominator tree until a common
// dominator is found. Visitation order guarantees that all predecessors
// except for backwards edges have been visited.
@@ -1042,19 +1097,19 @@ void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
// Don't examine backwards edges.
if ((*pred)->dominator_depth() < 0) continue;
dominator = BasicBlock::GetCommonDominator(dominator, *pred);
+ deferred = deferred & (*pred)->deferred();
}
block->set_dominator(dominator);
block->set_dominator_depth(dominator->dominator_depth() + 1);
- // Propagate "deferredness" of the dominator.
- if (dominator->deferred()) block->set_deferred(true);
- Trace("Block B%d's idom is B%d, depth = %d\n", block->id().ToInt(),
+ block->set_deferred(deferred | block->deferred());
+ TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
dominator->id().ToInt(), block->dominator_depth());
}
}
void Scheduler::GenerateImmediateDominatorTree() {
- Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
+ TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
// Seed start block to be the first dominator.
schedule_->start()->set_dominator_depth(0);
@@ -1079,7 +1134,7 @@ class PrepareUsesVisitor {
scheduler_->schedule_root_nodes_.push_back(node);
if (!schedule_->IsScheduled(node)) {
// Make sure root nodes are scheduled in their respective blocks.
- Trace("Scheduling fixed position node #%d:%s\n", node->id(),
+ TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
node->op()->mnemonic());
IrOpcode::Value opcode = node->opcode();
BasicBlock* block =
@@ -1109,7 +1164,7 @@ class PrepareUsesVisitor {
void Scheduler::PrepareUses() {
- Trace("--- PREPARE USES -------------------------------------------\n");
+ TRACE("--- PREPARE USES -------------------------------------------\n");
// Count the uses of every node, which is used to ensure that all of a
// node's uses are scheduled before the node itself.
@@ -1166,7 +1221,7 @@ class ScheduleEarlyNodeVisitor {
// Fixed nodes already know their schedule early position.
if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
data->minimum_block_ = schedule_->block(node);
- Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+ TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
node->id(), node->op()->mnemonic(),
data->minimum_block_->id().ToInt(),
data->minimum_block_->dominator_depth());
@@ -1204,7 +1259,7 @@ class ScheduleEarlyNodeVisitor {
if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
data->minimum_block_ = block;
queue_.push(node);
- Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+ TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
node->id(), node->op()->mnemonic(),
data->minimum_block_->id().ToInt(),
data->minimum_block_->dominator_depth());
@@ -1225,13 +1280,13 @@ class ScheduleEarlyNodeVisitor {
void Scheduler::ScheduleEarly() {
- Trace("--- SCHEDULE EARLY -----------------------------------------\n");
+ TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
- Trace("roots: ");
+ TRACE("roots: ");
for (Node* node : schedule_root_nodes_) {
- Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+ TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
}
- Trace("\n");
+ TRACE("\n");
}
// Compute the minimum block for each node thereby determining the earliest
@@ -1293,29 +1348,30 @@ class ScheduleLateNodeVisitor {
// Determine the dominating block for all of the uses of this node. It is
// the latest block that this node can be scheduled in.
- Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
+ TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
BasicBlock* block = GetCommonDominatorOfUses(node);
DCHECK_NOT_NULL(block);
// The schedule early block dominates the schedule late block.
BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
- Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n",
- node->id(), node->op()->mnemonic(), block->id().ToInt(),
- block->loop_depth(), min_block->id().ToInt());
+ TRACE(
+ "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
+ node->id(), node->op()->mnemonic(), block->id().ToInt(),
+ block->loop_depth(), min_block->id().ToInt());
// Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
// into enclosing loop pre-headers until they would preceed their schedule
// early position.
- BasicBlock* hoist_block = GetPreHeader(block);
+ BasicBlock* hoist_block = GetHoistBlock(block);
if (hoist_block &&
hoist_block->dominator_depth() >= min_block->dominator_depth()) {
do {
- Trace(" hoisting #%d:%s to block B%d\n", node->id(),
+ TRACE(" hoisting #%d:%s to block id:%d\n", node->id(),
node->op()->mnemonic(), hoist_block->id().ToInt());
DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
block = hoist_block;
- hoist_block = GetPreHeader(hoist_block);
+ hoist_block = GetHoistBlock(hoist_block);
} while (hoist_block &&
hoist_block->dominator_depth() >= min_block->dominator_depth());
} else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
@@ -1324,7 +1380,7 @@ class ScheduleLateNodeVisitor {
}
// Schedule the node or a floating control structure.
- if (NodeProperties::IsControl(node)) {
+ if (IrOpcode::IsMergeOpcode(node->opcode())) {
ScheduleFloatingControl(block, node);
} else {
ScheduleNode(block, node);
@@ -1345,6 +1401,8 @@ class ScheduleLateNodeVisitor {
BasicBlock* SplitNode(BasicBlock* block, Node* node) {
// For now, we limit splitting to pure nodes.
if (!node->op()->HasProperty(Operator::kPure)) return block;
+ // TODO(titzer): fix the special case of splitting of projections.
+ if (node->opcode() == IrOpcode::kProjection) return block;
// The {block} is common dominator of all uses of {node}, so we cannot
// split anything unless the {block} has at least two successors.
@@ -1361,7 +1419,7 @@ class ScheduleLateNodeVisitor {
BasicBlock* use_block = GetBlockForUse(edge);
if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
if (use_block == block) {
- Trace(" not splitting #%d:%s, it is used in B%d\n", node->id(),
+ TRACE(" not splitting #%d:%s, it is used in id:%d\n", node->id(),
node->op()->mnemonic(), block->id().ToInt());
marking_queue_.clear();
return block;
@@ -1389,7 +1447,7 @@ class ScheduleLateNodeVisitor {
// {block} to the end contain at least one use of {node}, and hence there's
// no point in splitting the {node} in this case.
if (marked_[block->id().ToSize()]) {
- Trace(" not splitting #%d:%s, its common dominator B%d is perfect\n",
+ TRACE(" not splitting #%d:%s, its common dominator id:%d is perfect\n",
node->id(), node->op()->mnemonic(), block->id().ToInt());
return block;
}
@@ -1411,12 +1469,12 @@ class ScheduleLateNodeVisitor {
// Place the {node} at {use_block}.
block = use_block;
use_node = node;
- Trace(" pushing #%d:%s down to B%d\n", node->id(),
+ TRACE(" pushing #%d:%s down to id:%d\n", node->id(),
node->op()->mnemonic(), block->id().ToInt());
} else {
// Place a copy of {node} at {use_block}.
use_node = CloneNode(node);
- Trace(" cloning #%d:%s for B%d\n", use_node->id(),
+ TRACE(" cloning #%d:%s for id:%d\n", use_node->id(),
use_node->op()->mnemonic(), use_block->id().ToInt());
scheduler_->schedule_queue_.push(use_node);
}
@@ -1426,14 +1484,23 @@ class ScheduleLateNodeVisitor {
return block;
}
- BasicBlock* GetPreHeader(BasicBlock* block) {
- if (block->IsLoopHeader()) {
- return block->dominator();
- } else if (block->loop_header() != NULL) {
- return block->loop_header()->dominator();
- } else {
- return NULL;
+ BasicBlock* GetHoistBlock(BasicBlock* block) {
+ if (block->IsLoopHeader()) return block->dominator();
+ // We have to check to make sure that the {block} dominates all
+ // of the outgoing blocks. If it doesn't, then there is a path
+ // out of the loop which does not execute this {block}, so we
+ // can't hoist operations from this {block} out of the loop, as
+ // that would introduce additional computations.
+ if (BasicBlock* header_block = block->loop_header()) {
+ for (BasicBlock* outgoing_block :
+ scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
+ if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
+ return nullptr;
+ }
+ }
+ return header_block->dominator();
}
+ return nullptr;
}
BasicBlock* GetCommonDominatorOfUses(Node* node) {
@@ -1448,32 +1515,43 @@ class ScheduleLateNodeVisitor {
return block;
}
+ BasicBlock* FindPredecessorBlock(Node* node) {
+ return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
+ }
+
BasicBlock* GetBlockForUse(Edge edge) {
Node* use = edge.from();
- IrOpcode::Value opcode = use->opcode();
- if (IrOpcode::IsPhiOpcode(opcode)) {
+ if (IrOpcode::IsPhiOpcode(use->opcode())) {
// If the use is from a coupled (i.e. floating) phi, compute the common
// dominator of its uses. This will not recurse more than one level.
if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
- Trace(" inspecting uses of coupled #%d:%s\n", use->id(),
+ TRACE(" inspecting uses of coupled #%d:%s\n", use->id(),
use->op()->mnemonic());
DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
return GetCommonDominatorOfUses(use);
}
- // If the use is from a fixed (i.e. non-floating) phi, use the block
- // of the corresponding control input to the merge.
+ // If the use is from a fixed (i.e. non-floating) phi, we use the
+ // predecessor block of the corresponding control input to the merge.
if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
- Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
+ TRACE(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
use->op()->mnemonic());
Node* merge = NodeProperties::GetControlInput(use, 0);
- opcode = merge->opcode();
- DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
- use = NodeProperties::GetControlInput(merge, edge.index());
+ DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
+ Node* input = NodeProperties::GetControlInput(merge, edge.index());
+ return FindPredecessorBlock(input);
+ }
+ } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
+ // If the use is from a fixed (i.e. non-floating) merge, we use the
+ // predecessor block of the current input to the merge.
+ if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
+ TRACE(" input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
+ use->op()->mnemonic());
+ return FindPredecessorBlock(edge.to());
}
}
BasicBlock* result = schedule_->block(use);
if (result == NULL) return NULL;
- Trace(" must dominate use #%d:%s in B%d\n", use->id(),
+ TRACE(" must dominate use #%d:%s in id:%d\n", use->id(),
use->op()->mnemonic(), result->id().ToInt());
return result;
}
@@ -1497,6 +1575,8 @@ class ScheduleLateNodeVisitor {
inputs[index] = input;
}
Node* copy = scheduler_->graph_->NewNode(node->op(), input_count, inputs);
+ TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
+ copy->id());
scheduler_->node_data_.resize(copy->id() + 1,
scheduler_->DefaultSchedulerData());
scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
@@ -1511,13 +1591,13 @@ class ScheduleLateNodeVisitor {
void Scheduler::ScheduleLate() {
- Trace("--- SCHEDULE LATE ------------------------------------------\n");
+ TRACE("--- SCHEDULE LATE ------------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
- Trace("roots: ");
+ TRACE("roots: ");
for (Node* node : schedule_root_nodes_) {
- Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+ TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
}
- Trace("\n");
+ TRACE("\n");
}
// Schedule: Places nodes in dominator block of all their uses.
@@ -1531,7 +1611,7 @@ void Scheduler::ScheduleLate() {
void Scheduler::SealFinalSchedule() {
- Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n");
+ TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
// Serialize the assembly order and reverse-post-order numbering.
special_rpo_->SerializeRPOIntoSchedule();
@@ -1553,7 +1633,7 @@ void Scheduler::SealFinalSchedule() {
void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
- Trace("--- FUSE FLOATING CONTROL ----------------------------------\n");
+ TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
OFStream os(stdout);
os << "Schedule before control flow fusion:\n" << *schedule_;
@@ -1582,11 +1662,11 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
}
}
if (FLAG_trace_turbo_scheduler) {
- Trace("propagation roots: ");
+ TRACE("propagation roots: ");
for (Node* node : propagation_roots) {
- Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+ TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
}
- Trace("\n");
+ TRACE("\n");
}
ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
schedule_early_visitor.Run(&propagation_roots);
@@ -1604,7 +1684,7 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
- Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(),
+ TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
to->id().ToInt());
NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
for (Node* const node : *nodes) {
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 59dd741ee4..d216009e4e 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -25,8 +25,10 @@ namespace internal {
namespace compiler {
// Macro for outputting trace information from representation inference.
-#define TRACE(x) \
- if (FLAG_trace_representation) PrintF x
+#define TRACE(...) \
+ do { \
+ if (FLAG_trace_representation) PrintF(__VA_ARGS__); \
+ } while (false)
// Representation selection and lowering of {Simplified} operators to machine
// operators are interwined. We use a fixpoint calculation to compute both the
@@ -85,7 +87,7 @@ class RepresentationSelector {
void Run(SimplifiedLowering* lowering) {
// Run propagation phase to a fixpoint.
- TRACE(("--{Propagation phase}--\n"));
+ TRACE("--{Propagation phase}--\n");
phase_ = PROPAGATE;
Enqueue(jsgraph_->graph()->end());
// Process nodes from the queue until it is empty.
@@ -94,20 +96,20 @@ class RepresentationSelector {
NodeInfo* info = GetInfo(node);
queue_.pop();
info->queued = false;
- TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+ TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
VisitNode(node, info->use, NULL);
- TRACE((" ==> output "));
+ TRACE(" ==> output ");
PrintInfo(info->output);
- TRACE(("\n"));
+ TRACE("\n");
}
// Run lowering and change insertion phase.
- TRACE(("--{Simplified lowering phase}--\n"));
+ TRACE("--{Simplified lowering phase}--\n");
phase_ = LOWER;
// Process nodes from the collected {nodes_} vector.
for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
Node* node = *i;
- TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+ TRACE(" visit #%d: %s\n", node->id(), node->op()->mnemonic());
// Reuse {VisitNode()} so the representation rules are in one place.
if (FLAG_turbo_source_positions) {
SourcePositionTable::Scope scope(
@@ -124,6 +126,11 @@ class RepresentationSelector {
Node* node = *i;
Node* replacement = *(++i);
node->ReplaceUses(replacement);
+ // We also need to replace the node in the rest of the vector.
+ for (NodeVector::iterator j = i + 1; j != replacements_.end(); ++j) {
+ ++j;
+ if (*j == node) *j = replacement;
+ }
}
}
@@ -138,21 +145,21 @@ class RepresentationSelector {
info->queued = true;
nodes_.push_back(node);
queue_.push(node);
- TRACE((" initial: "));
+ TRACE(" initial: ");
info->use |= use;
PrintUseInfo(node);
return;
}
- TRACE((" queue?: "));
+ TRACE(" queue?: ");
PrintUseInfo(node);
if ((info->use & use) != use) {
// New usage information for the node is available.
if (!info->queued) {
queue_.push(node);
info->queued = true;
- TRACE((" added: "));
+ TRACE(" added: ");
} else {
- TRACE((" inqueue: "));
+ TRACE(" inqueue: ");
}
info->use |= use;
PrintUseInfo(node);
@@ -190,14 +197,14 @@ class RepresentationSelector {
MachineTypeUnion output = GetInfo(input)->output;
if ((output & (kRepBit | kRepWord8 | kRepWord16 | kRepWord32)) == 0) {
// Output representation doesn't match usage.
- TRACE((" truncate-to-int32: #%d:%s(@%d #%d:%s) ", node->id(),
- node->op()->mnemonic(), index, input->id(),
- input->op()->mnemonic()));
- TRACE((" from "));
+ TRACE(" truncate-to-int32: #%d:%s(@%d #%d:%s) ", node->id(),
+ node->op()->mnemonic(), index, input->id(),
+ input->op()->mnemonic());
+ TRACE(" from ");
PrintInfo(output);
- TRACE((" to "));
+ TRACE(" to ");
PrintInfo(use);
- TRACE(("\n"));
+ TRACE("\n");
Node* n = changer_->GetTruncatedWord32For(input, output);
node->ReplaceInput(index, n);
}
@@ -215,14 +222,14 @@ class RepresentationSelector {
MachineTypeUnion output = GetInfo(input)->output;
if ((output & kRepMask & use) == 0) {
// Output representation doesn't match usage.
- TRACE((" change: #%d:%s(@%d #%d:%s) ", node->id(),
- node->op()->mnemonic(), index, input->id(),
- input->op()->mnemonic()));
- TRACE((" from "));
+ TRACE(" change: #%d:%s(@%d #%d:%s) ", node->id(),
+ node->op()->mnemonic(), index, input->id(),
+ input->op()->mnemonic());
+ TRACE(" from ");
PrintInfo(output);
- TRACE((" to "));
+ TRACE(" to ");
PrintInfo(use);
- TRACE(("\n"));
+ TRACE("\n");
Node* n = changer_->GetRepresentationFor(input, output, use);
node->ReplaceInput(index, n);
}
@@ -243,38 +250,37 @@ class RepresentationSelector {
}
// The default, most general visitation case. For {node}, process all value,
- // context, effect, and control inputs, assuming that value inputs should have
- // {kRepTagged} representation and can observe all output values {kTypeAny}.
+ // context, frame state, effect, and control inputs, assuming that value
+ // inputs should have {kRepTagged} representation and can observe all output
+ // values {kTypeAny}.
void VisitInputs(Node* node) {
- auto i = node->input_edges().begin();
- for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
- ProcessInput(node, (*i).index(), kMachAnyTagged); // Value inputs
- }
- for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
- ++i, j--) {
- ProcessInput(node, (*i).index(), kMachAnyTagged); // Context inputs
- }
- for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
- ++i, j--) {
- Enqueue((*i).to()); // FrameState inputs: just visit
+ int tagged_count = node->op()->ValueInputCount() +
+ OperatorProperties::GetContextInputCount(node->op());
+ // Visit value and context inputs as tagged.
+ for (int i = 0; i < tagged_count; i++) {
+ ProcessInput(node, i, kMachAnyTagged);
}
- for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
- Enqueue((*i).to()); // Effect inputs: just visit
+ // Only enqueue other inputs (framestates, effects, control).
+ for (int i = tagged_count; i < node->InputCount(); i++) {
+ Enqueue(node->InputAt(i));
}
- for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
- Enqueue((*i).to()); // Control inputs: just visit
- }
- DCHECK(i == node->input_edges().end());
+ // Assume the output is tagged.
SetOutput(node, kMachAnyTagged);
}
+ // Helper for binops of the R x L -> O variety.
+ void VisitBinop(Node* node, MachineTypeUnion left_use,
+ MachineTypeUnion right_use, MachineTypeUnion output) {
+ DCHECK_EQ(2, node->InputCount());
+ ProcessInput(node, 0, left_use);
+ ProcessInput(node, 1, right_use);
+ SetOutput(node, output);
+ }
+
// Helper for binops of the I x I -> O variety.
void VisitBinop(Node* node, MachineTypeUnion input_use,
MachineTypeUnion output) {
- DCHECK_EQ(2, node->InputCount());
- ProcessInput(node, 0, input_use);
- ProcessInput(node, 1, input_use);
- SetOutput(node, output);
+ VisitBinop(node, input_use, input_use, output);
}
// Helper for unops of the I -> O variety.
@@ -324,7 +330,8 @@ class RepresentationSelector {
} else if (upper->Is(Type::Signed32()) || upper->Is(Type::Unsigned32())) {
// multiple uses, but we are within 32 bits range => pick kRepWord32.
return kRepWord32;
- } else if ((use & kRepMask) == kRepWord32 ||
+ } else if (((use & kRepMask) == kRepWord32 &&
+ !CanObserveNonWord32(use)) ||
(use & kTypeMask) == kTypeInt32 ||
(use & kTypeMask) == kTypeUint32) {
// We only use 32 bits or we use the result consistently.
@@ -393,21 +400,36 @@ class RepresentationSelector {
}
// Convert inputs to the output representation of this phi.
- for (Edge const edge : node->input_edges()) {
- // TODO(titzer): it'd be nice to have distinguished edge kinds here.
- ProcessInput(node, edge.index(), values > 0 ? output_type : 0);
- values--;
+ for (int i = 0; i < node->InputCount(); i++) {
+ ProcessInput(node, i, i < values ? output_type : 0);
}
} else {
// Propagate {use} of the phi to value inputs, and 0 to control.
MachineType use_type =
static_cast<MachineType>((use & kTypeMask) | output);
- for (Edge const edge : node->input_edges()) {
- // TODO(titzer): it'd be nice to have distinguished edge kinds here.
- ProcessInput(node, edge.index(), values > 0 ? use_type : 0);
- values--;
+ for (int i = 0; i < node->InputCount(); i++) {
+ ProcessInput(node, i, i < values ? use_type : 0);
+ }
+ }
+ }
+
+ void VisitStateValues(Node* node) {
+ if (phase_ == PROPAGATE) {
+ for (int i = 0; i < node->InputCount(); i++) {
+ Enqueue(node->InputAt(i), kTypeAny);
+ }
+ } else {
+ Zone* zone = jsgraph_->zone();
+ ZoneVector<MachineType>* types =
+ new (zone->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(node->InputCount(), zone);
+ for (int i = 0; i < node->InputCount(); i++) {
+ MachineTypeUnion input_type = GetInfo(node->InputAt(i))->output;
+ (*types)[i] = static_cast<MachineType>(input_type);
}
+ node->set_op(jsgraph_->common()->TypedStateValues(types));
}
+ SetOutput(node, kMachAnyTagged);
}
const Operator* Int32Op(Node* node) {
@@ -426,18 +448,8 @@ class RepresentationSelector {
return BothInputsAre(node, Type::Signed32()) && !CanObserveNonInt32(use);
}
- bool IsSafeIntAdditiveOperand(Node* node) {
- Type* type = NodeProperties::GetBounds(node).upper;
- // TODO(jarin): Unfortunately, bitset types are not subtypes of larger
- // range types, so we have to explicitly check for Integral32 here
- // (in addition to the safe integer range). Once we fix subtyping for
- // ranges, we should simplify this.
- return type->Is(safe_int_additive_range_) || type->Is(Type::Integral32());
- }
-
bool CanLowerToInt32AdditiveBinop(Node* node, MachineTypeUnion use) {
- return IsSafeIntAdditiveOperand(node->InputAt(0)) &&
- IsSafeIntAdditiveOperand(node->InputAt(1)) &&
+ return BothInputsAre(node, safe_int_additive_range_) &&
!CanObserveNonInt32(use);
}
@@ -446,11 +458,14 @@ class RepresentationSelector {
}
bool CanLowerToUint32AdditiveBinop(Node* node, MachineTypeUnion use) {
- return IsSafeIntAdditiveOperand(node->InputAt(0)) &&
- IsSafeIntAdditiveOperand(node->InputAt(1)) &&
+ return BothInputsAre(node, safe_int_additive_range_) &&
!CanObserveNonUint32(use);
}
+ bool CanObserveNonWord32(MachineTypeUnion use) {
+ return (use & ~(kTypeUint32 | kTypeInt32)) != 0;
+ }
+
bool CanObserveNonInt32(MachineTypeUnion use) {
return (use & (kTypeUint32 | kTypeNumber | kTypeAny)) != 0;
}
@@ -531,24 +546,6 @@ class RepresentationSelector {
//------------------------------------------------------------------
// Simplified operators.
//------------------------------------------------------------------
- case IrOpcode::kAnyToBoolean: {
- VisitUnop(node, kMachAnyTagged, kTypeBool | kRepTagged);
- if (lower()) {
- // AnyToBoolean(x) => Call(ToBooleanStub, x, no-context)
- Operator::Properties properties = node->op()->properties();
- Callable callable = CodeFactory::ToBoolean(
- jsgraph_->isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
- CallDescriptor::Flags flags = CallDescriptor::kPatchableCallSite;
- CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- jsgraph_->isolate(), jsgraph_->zone(), callable.descriptor(), 0,
- flags, properties);
- node->set_op(jsgraph_->common()->Call(desc));
- node->InsertInput(jsgraph_->zone(), 0,
- jsgraph_->HeapConstant(callable.code()));
- node->AppendInput(jsgraph_->zone(), jsgraph_->NoContextConstant());
- }
- break;
- }
case IrOpcode::kBooleanNot: {
if (lower()) {
MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
@@ -900,7 +897,7 @@ class RepresentationSelector {
MachineTypeUnion tBase = kRepTagged | kMachPtr;
LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kMachInt32); // index
+ ProcessInput(node, 1, kMachIntPtr); // index
ProcessRemainingInputs(node, 2);
SetOutput(node, rep);
break;
@@ -910,7 +907,7 @@ class RepresentationSelector {
MachineTypeUnion tBase = kRepTagged | kMachPtr;
StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
ProcessInput(node, 0, tBase); // pointer or object
- ProcessInput(node, 1, kMachInt32); // index
+ ProcessInput(node, 1, kMachIntPtr); // index
ProcessInput(node, 2, rep.machine_type());
ProcessRemainingInputs(node, 3);
SetOutput(node, 0);
@@ -931,6 +928,9 @@ class RepresentationSelector {
case IrOpcode::kWord32Equal:
return VisitBinop(node, kRepWord32, kRepBit);
+ case IrOpcode::kWord32Clz:
+ return VisitUnop(node, kMachUint32, kMachUint32);
+
case IrOpcode::kInt32Add:
case IrOpcode::kInt32Sub:
case IrOpcode::kInt32Mul:
@@ -1012,10 +1012,10 @@ class RepresentationSelector {
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
+ case IrOpcode::kFloat64Min:
return VisitFloat64Binop(node);
case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
return VisitUnop(node, kMachFloat64, kMachFloat64);
@@ -1023,13 +1023,16 @@ class RepresentationSelector {
case IrOpcode::kFloat64LessThan:
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64Cmp(node);
+ case IrOpcode::kFloat64ExtractLowWord32:
+ case IrOpcode::kFloat64ExtractHighWord32:
+ return VisitUnop(node, kMachFloat64, kMachInt32);
+ case IrOpcode::kFloat64InsertLowWord32:
+ case IrOpcode::kFloat64InsertHighWord32:
+ return VisitBinop(node, kMachFloat64, kMachInt32, kMachFloat64);
case IrOpcode::kLoadStackPointer:
return VisitLeaf(node, kMachPtr);
case IrOpcode::kStateValues:
- for (int i = 0; i < node->InputCount(); i++) {
- ProcessInput(node, i, kTypeAny);
- }
- SetOutput(node, kMachAnyTagged);
+ VisitStateValues(node);
break;
default:
VisitInputs(node);
@@ -1038,11 +1041,10 @@ class RepresentationSelector {
}
void DeferReplacement(Node* node, Node* replacement) {
- if (FLAG_trace_representation) {
- TRACE(("defer replacement #%d:%s with #%d:%s\n", node->id(),
- node->op()->mnemonic(), replacement->id(),
- replacement->op()->mnemonic()));
- }
+ TRACE("defer replacement #%d:%s with #%d:%s\n", node->id(),
+ node->op()->mnemonic(), replacement->id(),
+ replacement->op()->mnemonic());
+
if (replacement->id() < count_ &&
GetInfo(replacement)->output == GetInfo(node)->output) {
// Replace with a previously existing node eagerly only if the type is the
@@ -1056,13 +1058,13 @@ class RepresentationSelector {
replacements_.push_back(node);
replacements_.push_back(replacement);
}
- // TODO(titzer) node->RemoveAllInputs(); // Node is now dead.
+ node->NullAllInputs(); // Node is now dead.
}
void PrintUseInfo(Node* node) {
- TRACE(("#%d:%-20s ", node->id(), node->op()->mnemonic()));
+ TRACE("#%d:%-20s ", node->id(), node->op()->mnemonic());
PrintInfo(GetUseInfo(node));
- TRACE(("\n"));
+ TRACE("\n");
}
void PrintInfo(MachineTypeUnion info) {
@@ -1135,10 +1137,15 @@ Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
}
-static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
- MachineType representation,
- Type* type) {
- // TODO(turbofan): skip write barriers for Smis, etc.
+namespace {
+
+WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+ MachineType representation,
+ Type* type) {
+ if (type->Is(Type::TaggedSigned())) {
+ // Write barriers are only for writes of heap objects.
+ return kNoWriteBarrier;
+ }
if (base_is_tagged == kTaggedBase &&
RepresentationOf(representation) == kRepTagged) {
// Write barriers are only for writes into heap objects (i.e. tagged base).
@@ -1147,6 +1154,8 @@ static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
return kNoWriteBarrier;
}
+} // namespace
+
void SimplifiedLowering::DoLoadField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
@@ -1158,8 +1167,9 @@ void SimplifiedLowering::DoLoadField(Node* node) {
void SimplifiedLowering::DoStoreField(Node* node) {
const FieldAccess& access = FieldAccessOf(node->op());
- WriteBarrierKind kind = ComputeWriteBarrierKind(
- access.base_is_tagged, access.machine_type, access.type);
+ Type* type = NodeProperties::GetBounds(node->InputAt(1)).upper;
+ WriteBarrierKind kind =
+ ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type, type);
node->set_op(
machine()->Store(StoreRepresentation(access.machine_type, kind)));
Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
@@ -1264,10 +1274,11 @@ void SimplifiedLowering::DoLoadElement(Node* node) {
void SimplifiedLowering::DoStoreElement(Node* node) {
const ElementAccess& access = ElementAccessOf(node->op());
- node->set_op(machine()->Store(StoreRepresentation(
- access.machine_type,
- ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type,
- access.type))));
+ Type* type = NodeProperties::GetBounds(node->InputAt(2)).upper;
+ node->set_op(machine()->Store(
+ StoreRepresentation(access.machine_type,
+ ComputeWriteBarrierKind(access.base_is_tagged,
+ access.machine_type, type))));
node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
}
@@ -1292,7 +1303,7 @@ void SimplifiedLowering::DoStringAdd(Node* node) {
Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
CEntryStub stub(jsgraph()->isolate(), 1);
Runtime::FunctionId f =
- requires_ordering ? Runtime::kStringCompare : Runtime::kStringEquals;
+ requires_ordering ? Runtime::kStringCompareRT : Runtime::kStringEquals;
ExternalReference ref(f, jsgraph()->isolate());
Operator::Properties props = node->op()->properties();
// TODO(mstarzinger): We should call StringCompareStub here instead, once an
@@ -1311,6 +1322,7 @@ Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
Node* SimplifiedLowering::Int32Div(Node* const node) {
Int32BinopMatcher m(node);
Node* const zero = jsgraph()->Int32Constant(0);
+ Node* const minus_one = jsgraph()->Int32Constant(-1);
Node* const lhs = m.left().node();
Node* const rhs = m.right().node();
@@ -1322,20 +1334,61 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
return graph()->NewNode(machine()->Int32Div(), lhs, rhs, graph()->start());
}
- Diamond if_zero(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), rhs, zero),
- BranchHint::kFalse);
+ // General case for signed integer division.
+ //
+ // if 0 < rhs then
+ // lhs / rhs
+ // else
+ // if rhs < -1 then
+ // lhs / rhs
+ // else if rhs == 0 then
+ // 0
+ // else
+ // 0 - lhs
+ //
+ // Note: We do not use the Diamond helper class here, because it really hurts
+ // readability with nested diamonds.
+ const Operator* const merge_op = common()->Merge(2);
+ const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+
+ Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+ graph()->start());
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* true0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
+
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* false0;
+ {
+ Node* check1 = graph()->NewNode(machine()->Int32LessThan(), rhs, minus_one);
+ Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* true1 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true1);
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* false1;
+ {
+ Node* check2 = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* true2 = zero;
+
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* false2 = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
+
+ if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
+ false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
+ }
- Diamond if_minus_one(graph(), common(),
- graph()->NewNode(machine()->Word32Equal(), rhs,
- jsgraph()->Int32Constant(-1)),
- BranchHint::kFalse);
- if_minus_one.Nest(if_zero, false);
- Node* sub = graph()->NewNode(machine()->Int32Sub(), zero, lhs);
- Node* div =
- graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_minus_one.if_false);
+ if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+ false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
+ }
- return if_zero.Phi(kMachInt32, zero, if_minus_one.Phi(kMachInt32, sub, div));
+ Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+ return graph()->NewNode(phi_op, true0, false0, merge0);
}
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.cc b/deps/v8/src/compiler/simplified-operator-reducer.cc
index ad48379f4c..047d251bf3 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.cc
+++ b/deps/v8/src/compiler/simplified-operator-reducer.cc
@@ -4,11 +4,9 @@
#include "src/compiler/simplified-operator-reducer.h"
-#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-matchers.h"
-#include "src/compiler/node-properties.h"
#include "src/compiler/operator-properties.h"
namespace v8 {
@@ -24,8 +22,6 @@ SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
switch (node->opcode()) {
- case IrOpcode::kAnyToBoolean:
- return ReduceAnyToBoolean(node);
case IrOpcode::kBooleanNot: {
HeapObjectMatcher<HeapObject> m(node->InputAt(0));
if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
@@ -111,32 +107,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
}
-Reduction SimplifiedOperatorReducer::ReduceAnyToBoolean(Node* node) {
- Node* const input = NodeProperties::GetValueInput(node, 0);
- Type* const input_type = NodeProperties::GetBounds(input).upper;
- if (input_type->Is(Type::Boolean())) {
- // AnyToBoolean(x:boolean) => x
- return Replace(input);
- }
- if (input_type->Is(Type::OrderedNumber())) {
- // AnyToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
- Node* compare = graph()->NewNode(simplified()->NumberEqual(), input,
- jsgraph()->ZeroConstant());
- return Change(node, simplified()->BooleanNot(), compare);
- }
- if (input_type->Is(Type::String())) {
- // AnyToBoolean(x:string) => BooleanNot(NumberEqual(x.length, #0))
- FieldAccess const access = AccessBuilder::ForStringLength();
- Node* length = graph()->NewNode(simplified()->LoadField(access), input,
- graph()->start(), graph()->start());
- Node* compare = graph()->NewNode(simplified()->NumberEqual(), length,
- jsgraph()->ZeroConstant());
- return Change(node, simplified()->BooleanNot(), compare);
- }
- return NoChange();
-}
-
-
Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
Node* a) {
DCHECK_EQ(node->InputCount(), OperatorProperties::GetTotalInputCount(op));
@@ -175,11 +145,6 @@ Factory* SimplifiedOperatorReducer::factory() const {
}
-CommonOperatorBuilder* SimplifiedOperatorReducer::common() const {
- return jsgraph()->common();
-}
-
-
MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
return jsgraph()->machine();
}
diff --git a/deps/v8/src/compiler/simplified-operator-reducer.h b/deps/v8/src/compiler/simplified-operator-reducer.h
index 32a7bcc560..bc2672354b 100644
--- a/deps/v8/src/compiler/simplified-operator-reducer.h
+++ b/deps/v8/src/compiler/simplified-operator-reducer.h
@@ -18,7 +18,6 @@ class Heap;
namespace compiler {
// Forward declarations.
-class CommonOperatorBuilder;
class JSGraph;
class MachineOperatorBuilder;
@@ -30,8 +29,6 @@ class SimplifiedOperatorReducer FINAL : public Reducer {
Reduction Reduce(Node* node) FINAL;
private:
- Reduction ReduceAnyToBoolean(Node* node);
-
Reduction Change(Node* node, const Operator* op, Node* a);
Reduction ReplaceFloat64(double value);
Reduction ReplaceInt32(int32_t value);
@@ -44,7 +41,6 @@ class SimplifiedOperatorReducer FINAL : public Reducer {
Graph* graph() const;
Factory* factory() const;
JSGraph* jsgraph() const { return jsgraph_; }
- CommonOperatorBuilder* common() const;
MachineOperatorBuilder* machine() const;
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index e1e8c30aad..a900d4e36c 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -158,7 +158,6 @@ const ElementAccess& ElementAccessOf(const Operator* op) {
#define PURE_OP_LIST(V) \
- V(AnyToBoolean, Operator::kNoProperties, 1) \
V(BooleanNot, Operator::kNoProperties, 1) \
V(BooleanToNumber, Operator::kNoProperties, 1) \
V(NumberEqual, Operator::kCommutative, 2) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 5eed7c3291..9cd203381a 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -128,8 +128,6 @@ class SimplifiedOperatorBuilder FINAL {
public:
explicit SimplifiedOperatorBuilder(Zone* zone);
- const Operator* AnyToBoolean();
-
const Operator* BooleanNot();
const Operator* BooleanToNumber();
diff --git a/deps/v8/src/compiler/state-values-utils.cc b/deps/v8/src/compiler/state-values-utils.cc
new file mode 100644
index 0000000000..2c7d0edd7a
--- /dev/null
+++ b/deps/v8/src/compiler/state-values-utils.cc
@@ -0,0 +1,317 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/state-values-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+StateValuesCache::StateValuesCache(JSGraph* js_graph)
+ : js_graph_(js_graph),
+ hash_map_(AreKeysEqual, ZoneHashMap::kDefaultHashMapCapacity,
+ ZoneAllocationPolicy(zone())),
+ working_space_(zone()),
+ empty_state_values_(nullptr) {}
+
+
+// static
+bool StateValuesCache::AreKeysEqual(void* key1, void* key2) {
+ NodeKey* node_key1 = reinterpret_cast<NodeKey*>(key1);
+ NodeKey* node_key2 = reinterpret_cast<NodeKey*>(key2);
+
+ if (node_key1->node == nullptr) {
+ if (node_key2->node == nullptr) {
+ return AreValueKeysEqual(reinterpret_cast<StateValuesKey*>(key1),
+ reinterpret_cast<StateValuesKey*>(key2));
+ } else {
+ return IsKeysEqualToNode(reinterpret_cast<StateValuesKey*>(key1),
+ node_key2->node);
+ }
+ } else {
+ if (node_key2->node == nullptr) {
+ // If the nodes are already processed, they must be the same.
+ return IsKeysEqualToNode(reinterpret_cast<StateValuesKey*>(key2),
+ node_key1->node);
+ } else {
+ return node_key1->node == node_key2->node;
+ }
+ }
+ UNREACHABLE();
+}
+
+
+// static
+bool StateValuesCache::IsKeysEqualToNode(StateValuesKey* key, Node* node) {
+ if (key->count != static_cast<size_t>(node->InputCount())) {
+ return false;
+ }
+ for (size_t i = 0; i < key->count; i++) {
+ if (key->values[i] != node->InputAt(static_cast<int>(i))) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+// static
+bool StateValuesCache::AreValueKeysEqual(StateValuesKey* key1,
+ StateValuesKey* key2) {
+ if (key1->count != key2->count) {
+ return false;
+ }
+ for (size_t i = 0; i < key1->count; i++) {
+ if (key1->values[i] != key2->values[i]) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
+Node* StateValuesCache::GetEmptyStateValues() {
+ if (empty_state_values_ == nullptr) {
+ empty_state_values_ = graph()->NewNode(common()->StateValues(0));
+ }
+ return empty_state_values_;
+}
+
+
+NodeVector* StateValuesCache::GetWorkingSpace(size_t level) {
+ while (working_space_.size() <= level) {
+ void* space = zone()->New(sizeof(NodeVector));
+ working_space_.push_back(new (space)
+ NodeVector(kMaxInputCount, nullptr, zone()));
+ }
+ return working_space_[level];
+}
+
+namespace {
+
+int StateValuesHashKey(Node** nodes, size_t count) {
+ size_t hash = count;
+ for (size_t i = 0; i < count; i++) {
+ hash = hash * 23 + nodes[i]->id();
+ }
+ return static_cast<int>(hash & 0x7fffffff);
+}
+
+} // namespace
+
+
+Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count) {
+ StateValuesKey key(count, nodes);
+ int hash = StateValuesHashKey(nodes, count);
+ ZoneHashMap::Entry* lookup =
+ hash_map_.Lookup(&key, hash, true, ZoneAllocationPolicy(zone()));
+ DCHECK_NOT_NULL(lookup);
+ Node* node;
+ if (lookup->value == nullptr) {
+ int input_count = static_cast<int>(count);
+ node = graph()->NewNode(common()->StateValues(input_count), input_count,
+ nodes);
+ NodeKey* new_key = new (zone()->New(sizeof(NodeKey))) NodeKey(node);
+ lookup->key = new_key;
+ lookup->value = node;
+ } else {
+ node = reinterpret_cast<Node*>(lookup->value);
+ }
+ return node;
+}
+
+
+class StateValuesCache::ValueArrayIterator {
+ public:
+ ValueArrayIterator(Node** values, size_t count)
+ : values_(values), count_(count), current_(0) {}
+
+ void Advance() {
+ if (!done()) {
+ current_++;
+ }
+ }
+
+ bool done() { return current_ >= count_; }
+
+ Node* node() {
+ DCHECK(!done());
+ return values_[current_];
+ }
+
+ private:
+ Node** values_;
+ size_t count_;
+ size_t current_;
+};
+
+
+Node* StateValuesCache::BuildTree(ValueArrayIterator* it, size_t max_height) {
+ if (max_height == 0) {
+ Node* node = it->node();
+ it->Advance();
+ return node;
+ }
+ DCHECK(!it->done());
+
+ NodeVector* buffer = GetWorkingSpace(max_height);
+ size_t count = 0;
+ for (; count < kMaxInputCount; count++) {
+ if (it->done()) break;
+ (*buffer)[count] = BuildTree(it, max_height - 1);
+ }
+ if (count == 1) {
+ return (*buffer)[0];
+ } else {
+ return GetValuesNodeFromCache(&(buffer->front()), count);
+ }
+}
+
+
+Node* StateValuesCache::GetNodeForValues(Node** values, size_t count) {
+#if DEBUG
+ for (size_t i = 0; i < count; i++) {
+ DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
+ DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+ }
+#endif
+ if (count == 0) {
+ return GetEmptyStateValues();
+ }
+ size_t height = 0;
+ size_t max_nodes = 1;
+ while (count > max_nodes) {
+ height++;
+ max_nodes *= kMaxInputCount;
+ }
+
+ ValueArrayIterator it(values, count);
+
+ Node* tree = BuildTree(&it, height);
+
+ // If the 'tree' is a single node, equip it with a StateValues wrapper.
+ if (tree->opcode() != IrOpcode::kStateValues &&
+ tree->opcode() != IrOpcode::kTypedStateValues) {
+ tree = GetValuesNodeFromCache(&tree, 1);
+ }
+
+ return tree;
+}
+
+
+StateValuesAccess::iterator::iterator(Node* node) : current_depth_(0) {
+ // A hacky way initialize - just set the index before the node we want
+ // to process and then advance to it.
+ stack_[current_depth_].node = node;
+ stack_[current_depth_].index = -1;
+ Advance();
+}
+
+
+StateValuesAccess::iterator::StatePos* StateValuesAccess::iterator::Top() {
+ DCHECK(current_depth_ >= 0);
+ DCHECK(current_depth_ < kMaxInlineDepth);
+ return &(stack_[current_depth_]);
+}
+
+
+void StateValuesAccess::iterator::Push(Node* node) {
+ current_depth_++;
+ CHECK(current_depth_ < kMaxInlineDepth);
+ stack_[current_depth_].node = node;
+ stack_[current_depth_].index = 0;
+}
+
+
+void StateValuesAccess::iterator::Pop() {
+ DCHECK(current_depth_ >= 0);
+ current_depth_--;
+}
+
+
+bool StateValuesAccess::iterator::done() { return current_depth_ < 0; }
+
+
+void StateValuesAccess::iterator::Advance() {
+ // Advance the current index.
+ Top()->index++;
+
+ // Fix up the position to point to a valid node.
+ while (true) {
+ // TODO(jarin): Factor to a separate method.
+ Node* node = Top()->node;
+ int index = Top()->index;
+
+ if (index >= node->InputCount()) {
+ // Pop stack and move to the next sibling.
+ Pop();
+ if (done()) {
+ // Stack is exhausted, we have reached the end.
+ return;
+ }
+ Top()->index++;
+ } else if (node->InputAt(index)->opcode() == IrOpcode::kStateValues ||
+ node->InputAt(index)->opcode() == IrOpcode::kTypedStateValues) {
+ // Nested state, we need to push to the stack.
+ Push(node->InputAt(index));
+ } else {
+ // We are on a valid node, we can stop the iteration.
+ return;
+ }
+ }
+}
+
+
+Node* StateValuesAccess::iterator::node() {
+ return Top()->node->InputAt(Top()->index);
+}
+
+
+MachineType StateValuesAccess::iterator::type() {
+ Node* state = Top()->node;
+ if (state->opcode() == IrOpcode::kStateValues) {
+ return kMachAnyTagged;
+ } else {
+ DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
+ const ZoneVector<MachineType>* types =
+ OpParameter<const ZoneVector<MachineType>*>(state);
+ return (*types)[Top()->index];
+ }
+}
+
+
+bool StateValuesAccess::iterator::operator!=(iterator& other) {
+ // We only allow comparison with end().
+ CHECK(other.done());
+ return !done();
+}
+
+
+StateValuesAccess::iterator& StateValuesAccess::iterator::operator++() {
+ Advance();
+ return *this;
+}
+
+
+StateValuesAccess::TypedNode StateValuesAccess::iterator::operator*() {
+ return TypedNode(node(), type());
+}
+
+
+size_t StateValuesAccess::size() {
+ size_t count = 0;
+ for (int i = 0; i < node_->InputCount(); i++) {
+ if (node_->InputAt(i)->opcode() == IrOpcode::kStateValues ||
+ node_->InputAt(i)->opcode() == IrOpcode::kTypedStateValues) {
+ count += StateValuesAccess(node_->InputAt(i)).size();
+ } else {
+ count++;
+ }
+ }
+ return count;
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/state-values-utils.h b/deps/v8/src/compiler/state-values-utils.h
new file mode 100644
index 0000000000..79550bd3ff
--- /dev/null
+++ b/deps/v8/src/compiler/state-values-utils.h
@@ -0,0 +1,120 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_STATE_VALUES_UTILS_H_
+#define V8_COMPILER_STATE_VALUES_UTILS_H_
+
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+
+class Graph;
+
+class StateValuesCache {
+ public:
+ explicit StateValuesCache(JSGraph* js_graph);
+
+ Node* GetNodeForValues(Node** values, size_t count);
+
+ private:
+ static const size_t kMaxInputCount = 8;
+
+ struct NodeKey {
+ Node* node;
+
+ explicit NodeKey(Node* node) : node(node) {}
+ };
+
+ struct StateValuesKey : public NodeKey {
+ // ValueArray - array of nodes ({node} has to be nullptr).
+ size_t count;
+ Node** values;
+
+ StateValuesKey(size_t count, Node** values)
+ : NodeKey(nullptr), count(count), values(values) {}
+ };
+
+ class ValueArrayIterator;
+
+ static bool AreKeysEqual(void* key1, void* key2);
+ static bool IsKeysEqualToNode(StateValuesKey* key, Node* node);
+ static bool AreValueKeysEqual(StateValuesKey* key1, StateValuesKey* key2);
+
+ Node* BuildTree(ValueArrayIterator* it, size_t max_height);
+ NodeVector* GetWorkingSpace(size_t level);
+ Node* GetEmptyStateValues();
+ Node* GetValuesNodeFromCache(Node** nodes, size_t count);
+
+ Graph* graph() { return js_graph_->graph(); }
+ CommonOperatorBuilder* common() { return js_graph_->common(); }
+
+ Zone* zone() { return graph()->zone(); }
+
+ JSGraph* js_graph_;
+ ZoneHashMap hash_map_;
+ ZoneVector<NodeVector*> working_space_; // One working space per level.
+ Node* empty_state_values_;
+};
+
+class StateValuesAccess {
+ public:
+ struct TypedNode {
+ Node* node;
+ MachineType type;
+ TypedNode(Node* node, MachineType type) : node(node), type(type) {}
+ };
+
+ class iterator {
+ public:
+ // Bare minimum of operators needed for range iteration.
+ bool operator!=(iterator& other);
+ iterator& operator++();
+ TypedNode operator*();
+
+ private:
+ friend class StateValuesAccess;
+
+ iterator() : current_depth_(-1) {}
+ explicit iterator(Node* node);
+
+ Node* node();
+ MachineType type();
+ bool done();
+ void Advance();
+
+ struct StatePos {
+ Node* node;
+ int index;
+
+ explicit StatePos(Node* node) : node(node), index(0) {}
+ StatePos() {}
+ };
+
+ StatePos* Top();
+ void Push(Node* node);
+ void Pop();
+
+ static const int kMaxInlineDepth = 8;
+ StatePos stack_[kMaxInlineDepth];
+ int current_depth_;
+ };
+
+ explicit StateValuesAccess(Node* node) : node_(node) {}
+
+ size_t size();
+ iterator begin() { return iterator(node_); }
+ iterator end() { return iterator(); }
+
+ private:
+ Node* node_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_STATE_VALUES_UTILS_H_
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 9af65597bf..85b0c3e140 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -156,9 +156,7 @@ Typer::Typer(Isolate* isolate, Graph* graph, MaybeHandle<Context> context)
graph_(graph),
context_(context),
decorator_(NULL),
- cache_(new (graph->zone()) LazyTypeCache(isolate, graph->zone())),
- weaken_min_limits_(graph->zone()),
- weaken_max_limits_(graph->zone()) {
+ cache_(new (graph->zone()) LazyTypeCache(isolate, graph->zone())) {
Zone* zone = this->zone();
Factory* f = isolate->factory();
@@ -202,20 +200,6 @@ Typer::Typer(Isolate* isolate, Graph* graph, MaybeHandle<Context> context)
weakint_fun1_ = Type::Function(weakint, number, zone);
random_fun_ = Type::Function(Type::OrderedNumber(), zone);
- const int limits_count = 20;
-
- weaken_min_limits_.reserve(limits_count + 1);
- weaken_max_limits_.reserve(limits_count + 1);
-
- double limit = 1 << 30;
- weaken_min_limits_.push_back(0);
- weaken_max_limits_.push_back(0);
- for (int i = 0; i < limits_count; i++) {
- weaken_min_limits_.push_back(-limit);
- weaken_max_limits_.push_back(limit - 1);
- limit *= 2;
- }
-
decorator_ = new (zone) Decorator(this);
graph_->AddDecorator(decorator_);
}
@@ -228,7 +212,8 @@ Typer::~Typer() {
class Typer::Visitor : public Reducer {
public:
- explicit Visitor(Typer* typer) : typer_(typer) {}
+ explicit Visitor(Typer* typer)
+ : typer_(typer), weakened_nodes_(typer->zone()) {}
Reduction Reduce(Node* node) OVERRIDE {
if (node->op()->ValueOutputCount() == 0) return NoChange();
@@ -296,6 +281,7 @@ class Typer::Visitor : public Reducer {
private:
Typer* typer_;
MaybeHandle<Context> context_;
+ ZoneSet<NodeId> weakened_nodes_;
#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
DECLARE_METHOD(Start)
@@ -313,13 +299,18 @@ class Typer::Visitor : public Reducer {
}
Bounds WrapContextBoundsForInput(Node* node);
- Type* Weaken(Type* current_type, Type* previous_type);
+ Type* Weaken(Node* node, Type* current_type, Type* previous_type);
Zone* zone() { return typer_->zone(); }
Isolate* isolate() { return typer_->isolate(); }
Graph* graph() { return typer_->graph(); }
MaybeHandle<Context> context() { return typer_->context(); }
+ void SetWeakened(NodeId node_id) { weakened_nodes_.insert(node_id); }
+ bool IsWeakened(NodeId node_id) {
+ return weakened_nodes_.find(node_id) != weakened_nodes_.end();
+ }
+
typedef Type* (*UnaryTyperFun)(Type*, Typer* t);
typedef Type* (*BinaryTyperFun)(Type*, Type*, Typer* t);
@@ -365,9 +356,11 @@ class Typer::Visitor : public Reducer {
if (NodeProperties::IsTyped(node)) {
// Widen the bounds of a previously typed node.
Bounds previous = NodeProperties::GetBounds(node);
- // Speed up termination in the presence of range types:
- current.upper = Weaken(current.upper, previous.upper);
- current.lower = Weaken(current.lower, previous.lower);
+ if (node->opcode() == IrOpcode::kPhi) {
+ // Speed up termination in the presence of range types:
+ current.upper = Weaken(node, current.upper, previous.upper);
+ current.lower = Weaken(node, current.lower, previous.lower);
+ }
// Types should not get less precise.
DCHECK(previous.lower->Is(current.lower));
@@ -730,6 +723,11 @@ Bounds Typer::Visitor::TypeStateValues(Node* node) {
}
+Bounds Typer::Visitor::TypeTypedStateValues(Node* node) {
+ return Bounds(Type::None(zone()), Type::Internal(zone()));
+}
+
+
Bounds Typer::Visitor::TypeCall(Node* node) {
return Bounds::Unbounded(zone());
}
@@ -1302,46 +1300,72 @@ Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
// the fixpoint calculation in case there appears to be a loop
// in the graph. In the current implementation, we are
// increasing the limits to the closest power of two.
-Type* Typer::Visitor::Weaken(Type* current_type, Type* previous_type) {
+Type* Typer::Visitor::Weaken(Node* node, Type* current_type,
+ Type* previous_type) {
+ static const double kWeakenMinLimits[] = {
+ 0.0, -1073741824.0, -2147483648.0, -4294967296.0, -8589934592.0,
+ -17179869184.0, -34359738368.0, -68719476736.0, -137438953472.0,
+ -274877906944.0, -549755813888.0, -1099511627776.0, -2199023255552.0,
+ -4398046511104.0, -8796093022208.0, -17592186044416.0, -35184372088832.0,
+ -70368744177664.0, -140737488355328.0, -281474976710656.0,
+ -562949953421312.0};
+ static const double kWeakenMaxLimits[] = {
+ 0.0, 1073741823.0, 2147483647.0, 4294967295.0, 8589934591.0,
+ 17179869183.0, 34359738367.0, 68719476735.0, 137438953471.0,
+ 274877906943.0, 549755813887.0, 1099511627775.0, 2199023255551.0,
+ 4398046511103.0, 8796093022207.0, 17592186044415.0, 35184372088831.0,
+ 70368744177663.0, 140737488355327.0, 281474976710655.0,
+ 562949953421311.0};
+ STATIC_ASSERT(arraysize(kWeakenMinLimits) == arraysize(kWeakenMaxLimits));
+
// If the types have nothing to do with integers, return the types.
- if (!current_type->Maybe(typer_->integer) ||
- !previous_type->Maybe(typer_->integer)) {
+ if (!previous_type->Maybe(typer_->integer)) {
return current_type;
}
+ DCHECK(current_type->Maybe(typer_->integer));
- Type* previous_number =
+ Type* current_integer =
+ Type::Intersect(current_type, typer_->integer, zone());
+ Type* previous_integer =
Type::Intersect(previous_type, typer_->integer, zone());
- Type* current_number = Type::Intersect(current_type, typer_->integer, zone());
- if (!current_number->IsRange() || !previous_number->IsRange()) {
- return current_type;
- }
- Type::RangeType* previous = previous_number->AsRange();
- Type::RangeType* current = current_number->AsRange();
+ // Once we start weakening a node, we should always weaken.
+ if (!IsWeakened(node->id())) {
+ // Only weaken if there is range involved; we should converge quickly
+ // for all other types (the exception is a union of many constants,
+ // but we currently do not increase the number of constants in unions).
+ Type::RangeType* previous = previous_integer->GetRange();
+ Type::RangeType* current = current_integer->GetRange();
+ if (current == nullptr || previous == nullptr) {
+ return current_type;
+ }
+ // Range is involved => we are weakening.
+ SetWeakened(node->id());
+ }
- double current_min = current->Min();
+ double current_min = current_integer->Min();
double new_min = current_min;
// Find the closest lower entry in the list of allowed
// minima (or negative infinity if there is no such entry).
- if (current_min != previous->Min()) {
+ if (current_min != previous_integer->Min()) {
new_min = typer_->integer->AsRange()->Min();
- for (const auto val : typer_->weaken_min_limits_) {
- if (val <= current_min) {
- new_min = val;
+ for (double const min : kWeakenMinLimits) {
+ if (min <= current_min) {
+ new_min = min;
break;
}
}
}
- double current_max = current->Max();
+ double current_max = current_integer->Max();
double new_max = current_max;
// Find the closest greater entry in the list of allowed
// maxima (or infinity if there is no such entry).
- if (current_max != previous->Max()) {
+ if (current_max != previous_integer->Max()) {
new_max = typer_->integer->AsRange()->Max();
- for (const auto val : typer_->weaken_max_limits_) {
- if (val >= current_max) {
- new_max = val;
+ for (double const max : kWeakenMaxLimits) {
+ if (max >= current_max) {
+ new_max = max;
break;
}
}
@@ -1506,6 +1530,23 @@ Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
case Runtime::kInlineIsFunction:
case Runtime::kInlineIsRegExp:
return Bounds(Type::None(zone()), Type::Boolean(zone()));
+ case Runtime::kInlineDoubleLo:
+ case Runtime::kInlineDoubleHi:
+ return Bounds(Type::None(zone()), Type::Signed32());
+ case Runtime::kInlineConstructDouble:
+ case Runtime::kInlineMathFloor:
+ case Runtime::kInlineMathSqrt:
+ case Runtime::kInlineMathAcos:
+ case Runtime::kInlineMathAsin:
+ case Runtime::kInlineMathAtan:
+ case Runtime::kInlineMathAtan2:
+ return Bounds(Type::None(zone()), Type::Number());
+ case Runtime::kInlineMathClz32:
+ return Bounds(Type::None(), Type::Range(0, 32, zone()));
+ case Runtime::kInlineStringGetLength:
+ // The string::length property is always an unsigned smi.
+ return Bounds(Type::None(), Type::Intersect(Type::UnsignedSmall(),
+ Type::TaggedSigned()));
default:
break;
}
@@ -1513,7 +1554,7 @@ Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
}
-Bounds Typer::Visitor::TypeJSDebugger(Node* node) {
+Bounds Typer::Visitor::TypeJSStackCheck(Node* node) {
return Bounds::Unbounded(zone());
}
@@ -1521,11 +1562,6 @@ Bounds Typer::Visitor::TypeJSDebugger(Node* node) {
// Simplified operators.
-Bounds Typer::Visitor::TypeAnyToBoolean(Node* node) {
- return TypeUnaryOp(node, ToBoolean);
-}
-
-
Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
return Bounds(Type::None(zone()), Type::Boolean(zone()));
}
@@ -1616,16 +1652,15 @@ Bounds Typer::Visitor::TypeStringAdd(Node* node) {
}
-static Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
- // TODO(neis): Enable when expressible.
- /*
- return Type::Union(
- Type::Intersect(type, Type::Semantic(), zone),
- Type::Intersect(rep, Type::Representation(), zone), zone);
- */
- return type;
+namespace {
+
+Type* ChangeRepresentation(Type* type, Type* rep, Zone* zone) {
+ return Type::Union(Type::Semantic(type, zone),
+ Type::Representation(rep, zone), zone);
}
+} // namespace
+
Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
Bounds arg = Operand(node, 0);
@@ -1657,9 +1692,12 @@ Bounds Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
Bounds Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
Bounds arg = Operand(node, 0);
// TODO(neis): DCHECK(arg.upper->Is(Type::Signed32()));
- return Bounds(
- ChangeRepresentation(arg.lower, Type::Tagged(), zone()),
- ChangeRepresentation(arg.upper, Type::Tagged(), zone()));
+ Type* lower_rep = arg.lower->Is(Type::SignedSmall()) ? Type::TaggedSigned()
+ : Type::Tagged();
+ Type* upper_rep = arg.upper->Is(Type::SignedSmall()) ? Type::TaggedSigned()
+ : Type::Tagged();
+ return Bounds(ChangeRepresentation(arg.lower, lower_rep, zone()),
+ ChangeRepresentation(arg.upper, upper_rep, zone()));
}
@@ -1804,6 +1842,11 @@ Bounds Typer::Visitor::TypeWord32Equal(Node* node) {
}
+Bounds Typer::Visitor::TypeWord32Clz(Node* node) {
+ return Bounds(Type::Integral32());
+}
+
+
Bounds Typer::Visitor::TypeWord64And(Node* node) {
return Bounds(Type::Internal());
}
@@ -2052,6 +2095,16 @@ Bounds Typer::Visitor::TypeFloat64Mod(Node* node) {
}
+Bounds Typer::Visitor::TypeFloat64Max(Node* node) {
+ return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Min(Node* node) {
+ return Bounds(Type::Number());
+}
+
+
Bounds Typer::Visitor::TypeFloat64Sqrt(Node* node) {
return Bounds(Type::Number());
}
@@ -2072,26 +2125,40 @@ Bounds Typer::Visitor::TypeFloat64LessThanOrEqual(Node* node) {
}
-Bounds Typer::Visitor::TypeFloat64Floor(Node* node) {
+Bounds Typer::Visitor::TypeFloat64RoundDown(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Bounds(Type::Number());
}
-Bounds Typer::Visitor::TypeFloat64Ceil(Node* node) {
+Bounds Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Bounds(Type::Number());
}
-Bounds Typer::Visitor::TypeFloat64RoundTruncate(Node* node) {
+Bounds Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
// TODO(sigurds): We could have a tighter bound here.
return Bounds(Type::Number());
}
-Bounds Typer::Visitor::TypeFloat64RoundTiesAway(Node* node) {
- // TODO(sigurds): We could have a tighter bound here.
+Bounds Typer::Visitor::TypeFloat64ExtractLowWord32(Node* node) {
+ return Bounds(Type::Signed32());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64ExtractHighWord32(Node* node) {
+ return Bounds(Type::Signed32());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64InsertLowWord32(Node* node) {
+ return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeFloat64InsertHighWord32(Node* node) {
return Bounds(Type::Number());
}
diff --git a/deps/v8/src/compiler/typer.h b/deps/v8/src/compiler/typer.h
index a288d060a6..4c04ddb973 100644
--- a/deps/v8/src/compiler/typer.h
+++ b/deps/v8/src/compiler/typer.h
@@ -6,7 +6,6 @@
#define V8_COMPILER_TYPER_H_
#include "src/compiler/graph.h"
-#include "src/compiler/opcodes.h"
#include "src/types.h"
namespace v8 {
@@ -63,8 +62,6 @@ class Typer {
Type* random_fun_;
LazyTypeCache* cache_;
- ZoneVector<double> weaken_min_limits_;
- ZoneVector<double> weaken_max_limits_;
DISALLOW_COPY_AND_ASSIGN(Typer);
};
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 9480afb0e2..0768ece2a3 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -125,8 +125,8 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(input_count, node->InputCount());
// Verify that frame state has been inserted for the nodes that need it.
- if (OperatorProperties::HasFrameStateInput(node->op())) {
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ for (int i = 0; i < frame_state_count; i++) {
+ Node* frame_state = NodeProperties::GetFrameStateInput(node, i);
CHECK(frame_state->opcode() == IrOpcode::kFrameState ||
// kFrameState uses undefined as a sentinel.
(node->opcode() == IrOpcode::kFrameState &&
@@ -228,6 +228,15 @@ void Verifier::Visitor::Check(Node* node) {
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kIfSuccess:
+ case IrOpcode::kIfException: {
+ // IfSuccess and IfException continuation only on throwing nodes.
+ Node* input = NodeProperties::GetControlInput(node, 0);
+ CHECK(!input->op()->HasProperty(Operator::kNoThrow));
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
+ }
case IrOpcode::kSwitch: {
// Switch uses are Case and Default.
int count_case = 0, count_default = 0;
@@ -273,6 +282,10 @@ void Verifier::Visitor::Check(Node* node) {
// Type is empty.
CheckNotTyped(node);
break;
+ case IrOpcode::kDeoptimize:
+ // TODO(rossberg): check successor is End
+ // Type is empty.
+ CheckNotTyped(node);
case IrOpcode::kReturn:
// TODO(rossberg): check successor is End
// Type is empty.
@@ -414,6 +427,7 @@ void Verifier::Visitor::Check(Node* node) {
// TODO(jarin): what are the constraints on these?
break;
case IrOpcode::kStateValues:
+ case IrOpcode::kTypedStateValues:
// TODO(jarin): what are the constraints on these?
break;
case IrOpcode::kCall:
@@ -532,17 +546,17 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kJSCallFunction:
case IrOpcode::kJSCallRuntime:
case IrOpcode::kJSYield:
- case IrOpcode::kJSDebugger:
// Type can be anything.
CheckUpperIs(node, Type::Any());
break;
+ case IrOpcode::kJSStackCheck:
+ // Type is empty.
+ CheckNotTyped(node);
+ break;
+
// Simplified operators
// -------------------------------
- case IrOpcode::kAnyToBoolean:
- // Type is Boolean.
- CheckUpperIs(node, Type::Boolean());
- break;
case IrOpcode::kBooleanNot:
// Boolean -> Boolean
CheckValueInputIs(node, 0, Type::Boolean());
@@ -604,10 +618,6 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kReferenceEqual: {
// (Unique, Any) -> Boolean and
// (Any, Unique) -> Boolean
- if (typing == TYPED) {
- CHECK(bounds(ValueInput(node, 0)).upper->Is(Type::Unique()) ||
- bounds(ValueInput(node, 1)).upper->Is(Type::Unique()));
- }
CheckUpperIs(node, Type::Boolean());
break;
}
@@ -736,6 +746,7 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kWord32Sar:
case IrOpcode::kWord32Ror:
case IrOpcode::kWord32Equal:
+ case IrOpcode::kWord32Clz:
case IrOpcode::kWord64And:
case IrOpcode::kWord64Or:
case IrOpcode::kWord64Xor:
@@ -774,9 +785,10 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kFloat64Mul:
case IrOpcode::kFloat64Div:
case IrOpcode::kFloat64Mod:
+ case IrOpcode::kFloat64Max:
+ case IrOpcode::kFloat64Min:
case IrOpcode::kFloat64Sqrt:
- case IrOpcode::kFloat64Floor:
- case IrOpcode::kFloat64Ceil:
+ case IrOpcode::kFloat64RoundDown:
case IrOpcode::kFloat64RoundTruncate:
case IrOpcode::kFloat64RoundTiesAway:
case IrOpcode::kFloat64Equal:
@@ -792,13 +804,17 @@ void Verifier::Visitor::Check(Node* node) {
case IrOpcode::kChangeFloat32ToFloat64:
case IrOpcode::kChangeFloat64ToInt32:
case IrOpcode::kChangeFloat64ToUint32:
+ case IrOpcode::kFloat64ExtractLowWord32:
+ case IrOpcode::kFloat64ExtractHighWord32:
+ case IrOpcode::kFloat64InsertLowWord32:
+ case IrOpcode::kFloat64InsertHighWord32:
case IrOpcode::kLoadStackPointer:
case IrOpcode::kCheckedLoad:
case IrOpcode::kCheckedStore:
// TODO(rossberg): Check.
break;
}
-}
+} // NOLINT(readability/fn_size)
void Verifier::Run(Graph* graph, Typing typing) {
@@ -806,7 +822,23 @@ void Verifier::Run(Graph* graph, Typing typing) {
CHECK_NOT_NULL(graph->end());
Zone zone;
Visitor visitor(&zone, typing);
- for (Node* node : AllNodes(&zone, graph).live) visitor.Check(node);
+ AllNodes all(&zone, graph);
+ for (Node* node : all.live) visitor.Check(node);
+
+ // Check the uniqueness of projections.
+ for (Node* proj : all.live) {
+ if (proj->opcode() != IrOpcode::kProjection) continue;
+ Node* node = proj->InputAt(0);
+ for (Node* other : node->uses()) {
+ if (all.IsLive(other) && other != proj &&
+ other->opcode() == IrOpcode::kProjection &&
+ ProjectionIndexOf(other->op()) == ProjectionIndexOf(proj->op())) {
+ V8_Fatal(__FILE__, __LINE__,
+ "Node #%d:%s has duplicate projections #%d and #%d",
+ node->id(), node->op()->mnemonic(), proj->id(), other->id());
+ }
+ }
+ }
}
@@ -856,7 +888,7 @@ static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
use_pos)) {
V8_Fatal(__FILE__, __LINE__,
"Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
- node->id(), node->op()->mnemonic(), block->id().ToInt(), j,
+ node->id(), node->op()->mnemonic(), block->rpo_number(), j,
input->id(), input->op()->mnemonic());
}
}
@@ -869,8 +901,8 @@ static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
if (!Dominates(schedule, ctl, node)) {
V8_Fatal(__FILE__, __LINE__,
"Node #%d:%s in B%d is not dominated by control input #%d:%s",
- node->id(), node->op()->mnemonic(), block->id(), ctl->id(),
- ctl->op()->mnemonic());
+ node->id(), node->op()->mnemonic(), block->rpo_number(),
+ ctl->id(), ctl->op()->mnemonic());
}
}
}
@@ -964,7 +996,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
BasicBlock* idom = block->dominator();
if (idom != NULL && !block_doms->Contains(idom->id().ToInt())) {
V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
- block->id().ToInt(), idom->id().ToInt());
+ block->rpo_number(), idom->rpo_number());
}
for (size_t s = 0; s < block->SuccessorCount(); s++) {
BasicBlock* succ = block->SuccessorAt(s);
@@ -1002,7 +1034,7 @@ void ScheduleVerifier::Run(Schedule* schedule) {
!dominators[idom->id().ToSize()]->Contains(dom->id().ToInt())) {
V8_Fatal(__FILE__, __LINE__,
"Block B%d is not immediately dominated by B%d",
- block->id().ToInt(), idom->id().ToInt());
+ block->rpo_number(), idom->rpo_number());
}
}
}
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 973bbd1ef0..3160734788 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -18,17 +18,22 @@ namespace compiler {
#define __ masm()->
+#define kScratchDoubleReg xmm0
+
+
// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {
public:
X64OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
- Immediate InputImmediate(int index) {
+ Immediate InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
- Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+ Operand InputOperand(size_t index, int extra = 0) {
+ return ToOperand(instr_->InputAt(index), extra);
+ }
Operand OutputOperand() { return ToOperand(instr_->Output()); }
@@ -43,8 +48,8 @@ class X64OperandConverter : public InstructionOperandConverter {
return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
}
- static int NextOffset(int* offset) {
- int i = *offset;
+ static size_t NextOffset(size_t* offset) {
+ size_t i = *offset;
(*offset)++;
return i;
}
@@ -59,7 +64,7 @@ class X64OperandConverter : public InstructionOperandConverter {
return static_cast<ScaleFactor>(scale);
}
- Operand MemoryOperand(int* offset) {
+ Operand MemoryOperand(size_t* offset) {
AddressingMode mode = AddressingModeField::decode(instr_->opcode());
switch (mode) {
case kMode_MR: {
@@ -124,7 +129,7 @@ class X64OperandConverter : public InstructionOperandConverter {
return Operand(no_reg, 0);
}
- Operand MemoryOperand(int first_input = 0) {
+ Operand MemoryOperand(size_t first_input = 0) {
return MemoryOperand(&first_input);
}
};
@@ -132,7 +137,7 @@ class X64OperandConverter : public InstructionOperandConverter {
namespace {
-bool HasImmediateInput(Instruction* instr, int index) {
+bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
@@ -526,7 +531,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
int entry = Code::kHeaderSize - kHeapObjectTag;
__ Call(Operand(reg, entry));
}
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchCallJSFunction: {
@@ -538,7 +543,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Assert(equal, kWrongFunctionContext);
}
__ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
- AddSafepointAndDeopt(instr);
+ RecordCallPosition(instr);
break;
}
case kArchJmp:
@@ -553,6 +558,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchNop:
// don't emit code for nops.
break;
+ case kArchDeoptimize: {
+ int deopt_state_id =
+ BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
+ AssembleDeoptimizerCall(deopt_state_id, Deoptimizer::EAGER);
+ break;
+ }
case kArchRet:
AssembleReturn();
break;
@@ -683,6 +694,13 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kX64Ror:
ASSEMBLE_SHIFT(rorq, 6);
break;
+ case kX64Lzcnt32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ Lzcntl(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ Lzcntl(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
case kSSEFloat64Cmp:
ASSEMBLE_DOUBLE_BINOP(ucomisd);
break;
@@ -730,6 +748,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ addq(rsp, Immediate(kDoubleSize));
break;
}
+ case kSSEFloat64Max:
+ ASSEMBLE_DOUBLE_BINOP(maxsd);
+ break;
+ case kSSEFloat64Min:
+ ASSEMBLE_DOUBLE_BINOP(minsd);
+ break;
case kSSEFloat64Sqrt:
if (instr->InputAt(0)->IsDoubleRegister()) {
__ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
@@ -737,22 +761,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
- case kSSEFloat64Floor: {
+ case kSSEFloat64Round: {
CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundDown);
- break;
- }
- case kSSEFloat64Ceil: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundUp);
- break;
- }
- case kSSEFloat64RoundTruncate: {
- CpuFeatureScope sse_scope(masm(), SSE4_1);
- __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- v8::internal::Assembler::kRoundToZero);
+ RoundingMode const mode =
+ static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
+ __ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
case kSSECvtss2sd:
@@ -800,6 +813,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
break;
+ case kSSEFloat64ExtractLowWord32:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movl(i.OutputRegister(), i.InputOperand(0));
+ } else {
+ __ movd(i.OutputRegister(), i.InputDoubleRegister(0));
+ }
+ break;
+ case kSSEFloat64ExtractHighWord32:
+ if (instr->InputAt(0)->IsDoubleStackSlot()) {
+ __ movl(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
+ } else {
+ __ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
+ }
+ break;
+ case kSSEFloat64InsertLowWord32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 0);
+ } else {
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0);
+ }
+ break;
+ case kSSEFloat64InsertHighWord32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputRegister(1), 1);
+ } else {
+ __ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1);
+ }
+ break;
+ case kSSEFloat64LoadLowWord32:
+ if (instr->InputAt(0)->IsRegister()) {
+ __ movd(i.OutputDoubleRegister(), i.InputRegister(0));
+ } else {
+ __ movd(i.OutputDoubleRegister(), i.InputOperand(0));
+ }
+ break;
case kAVXFloat64Add:
ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
break;
@@ -812,6 +860,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kAVXFloat64Div:
ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
break;
+ case kAVXFloat64Max:
+ ASSEMBLE_AVX_DOUBLE_BINOP(vmaxsd);
+ break;
+ case kAVXFloat64Min:
+ ASSEMBLE_AVX_DOUBLE_BINOP(vminsd);
+ break;
case kX64Movsxbl:
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
@@ -821,7 +875,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movb: {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movb(operand, Immediate(i.InputInt8(index)));
@@ -839,7 +893,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ AssertZeroExtended(i.OutputRegister());
break;
case kX64Movw: {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movw(operand, Immediate(i.InputInt16(index)));
@@ -861,7 +915,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
__ AssertZeroExtended(i.OutputRegister());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movl(operand, i.InputImmediate(index));
@@ -877,7 +931,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->HasOutput()) {
__ movq(i.OutputRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ movq(operand, i.InputImmediate(index));
@@ -890,7 +944,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ movss(operand, i.InputDoubleRegister(index));
}
@@ -899,7 +953,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
if (instr->HasOutput()) {
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
- int index = 0;
+ size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
}
@@ -1005,8 +1059,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
break;
+ case kX64StackCheck:
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ break;
}
-}
+} // NOLINT(readability/fn_size)
// Assembles branches after this instruction.
@@ -1064,7 +1121,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
}
-void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
}
@@ -1078,8 +1135,8 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// Materialize a full 64-bit 1 or 0 value. The result register is always the
// last output of the instruction.
Label check;
- DCHECK_NE(0, static_cast<int>(instr->OutputCount()));
- Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
+ DCHECK_NE(0u, instr->OutputCount());
+ Register reg = i.OutputRegister(instr->OutputCount() - 1);
Condition cc = no_condition;
switch (condition) {
case kUnorderedEqual:
@@ -1140,8 +1197,8 @@ void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
X64OperandConverter i(this, instr);
Register input = i.InputRegister(0);
for (size_t index = 2; index < instr->InputCount(); index += 2) {
- __ cmpl(input, Immediate(i.InputInt32(static_cast<int>(index + 0))));
- __ j(equal, GetLabel(i.InputRpo(static_cast<int>(index + 1))));
+ __ cmpl(input, Immediate(i.InputInt32(index + 0)));
+ __ j(equal, GetLabel(i.InputRpo(index + 1)));
}
AssembleArchJump(i.InputRpo(1));
}
@@ -1163,9 +1220,10 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
}
-void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+void CodeGenerator::AssembleDeoptimizerCall(
+ int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
- isolate(), deoptimization_id, Deoptimizer::LAZY);
+ isolate(), deoptimization_id, bailout_type);
__ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
}
@@ -1207,6 +1265,8 @@ void CodeGenerator::AssemblePrologue() {
// remaining stack slots.
if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
+ // TODO(titzer): cannot address target function == local #-1
+ __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
stack_slots -= frame()->GetOsrStackSlotCount();
}
@@ -1307,9 +1367,18 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
case Constant::kExternalReference:
__ Move(dst, src.ToExternalReference());
break;
- case Constant::kHeapObject:
- __ Move(dst, src.ToHeapObject());
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ if (info()->IsOptimizing() &&
+ src_object.is_identical_to(info()->context())) {
+ // Loading the context from the frame is way cheaper than
+ // materializing the actual context heap object address.
+ __ movp(dst, Operand(rbp, StandardFrameConstants::kContextOffset));
+ } else {
+ __ Move(dst, src_object);
+ }
break;
+ }
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): load of labels on x64.
break;
@@ -1342,7 +1411,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
XMMRegister src = g.ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movsd(dst, src);
+ __ movaps(dst, src);
} else {
DCHECK(destination->IsDoubleStackSlot());
Operand dst = g.ToOperand(destination);
@@ -1393,9 +1462,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
// available as a fixed scratch register.
XMMRegister src = g.ToDoubleRegister(source);
XMMRegister dst = g.ToDoubleRegister(destination);
- __ movsd(xmm0, src);
- __ movsd(src, dst);
- __ movsd(dst, xmm0);
+ __ movaps(xmm0, src);
+ __ movaps(src, dst);
+ __ movaps(dst, xmm0);
} else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 77e3e52158..9416017af8 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -46,6 +46,7 @@ namespace compiler {
V(X64Sar32) \
V(X64Ror) \
V(X64Ror32) \
+ V(X64Lzcnt32) \
V(SSEFloat64Cmp) \
V(SSEFloat64Add) \
V(SSEFloat64Sub) \
@@ -53,19 +54,26 @@ namespace compiler {
V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat64Sqrt) \
- V(SSEFloat64Floor) \
- V(SSEFloat64Ceil) \
- V(SSEFloat64RoundTruncate) \
+ V(SSEFloat64Round) \
+ V(SSEFloat64Max) \
+ V(SSEFloat64Min) \
V(SSECvtss2sd) \
V(SSECvtsd2ss) \
V(SSEFloat64ToInt32) \
V(SSEFloat64ToUint32) \
V(SSEInt32ToFloat64) \
V(SSEUint32ToFloat64) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
V(AVXFloat64Add) \
V(AVXFloat64Sub) \
V(AVXFloat64Mul) \
V(AVXFloat64Div) \
+ V(AVXFloat64Max) \
+ V(AVXFloat64Min) \
V(X64Movsxbl) \
V(X64Movzxbl) \
V(X64Movb) \
@@ -82,7 +90,8 @@ namespace compiler {
V(X64Dec32) \
V(X64Inc32) \
V(X64Push) \
- V(X64StoreWriteBarrier)
+ V(X64StoreWriteBarrier) \
+ V(X64StackCheck)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index db2d8cb08c..a948257acf 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -346,14 +346,13 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
outputs[output_count++] = g.DefineAsRegister(cont->result());
}
- DCHECK_NE(0, static_cast<int>(input_count));
- DCHECK_NE(0, static_cast<int>(output_count));
+ DCHECK_NE(0u, input_count);
+ DCHECK_NE(0u, output_count);
DCHECK_GE(arraysize(inputs), input_count);
DCHECK_GE(arraysize(outputs), output_count);
- Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
- outputs, input_count, inputs);
- if (cont->IsBranch()) instr->MarkAsControl();
+ selector->Emit(cont->Encode(opcode), output_count, outputs, input_count,
+ inputs);
}
@@ -471,7 +470,7 @@ void EmitLea(InstructionSelector* selector, InstructionCode opcode,
AddressingMode mode = g.GenerateMemoryOperandInputs(
index, scale, base, displacement, inputs, &input_count);
- DCHECK_NE(0, static_cast<int>(input_count));
+ DCHECK_NE(0u, input_count);
DCHECK_GE(arraysize(inputs), input_count);
InstructionOperand outputs[1];
@@ -555,6 +554,12 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
}
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
void InstructionSelector::VisitInt32Add(Node* node) {
X64OperandGenerator g(this);
@@ -844,6 +849,19 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
void InstructionSelector::VisitFloat64Sub(Node* node) {
X64OperandGenerator g(this);
+ Float64BinopMatcher m(node);
+ if (m.left().IsMinusZero() && m.right().IsFloat64RoundDown() &&
+ CanCover(m.node(), m.right().node())) {
+ if (m.right().InputAt(0)->opcode() == IrOpcode::kFloat64Sub &&
+ CanCover(m.right().node(), m.right().InputAt(0))) {
+ Float64BinopMatcher mright0(m.right().InputAt(0));
+ if (mright0.left().IsMinusZero()) {
+ Emit(kSSEFloat64Round | MiscField::encode(kRoundUp),
+ g.DefineAsRegister(node), g.UseRegister(mright0.right().node()));
+ return;
+ }
+ }
+ }
if (IsSupported(AVX)) {
Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
@@ -887,6 +905,30 @@ void InstructionSelector::VisitFloat64Mod(Node* node) {
}
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ X64OperandGenerator g(this);
+ if (IsSupported(AVX)) {
+ Emit(kAVXFloat64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ } else {
+ Emit(kSSEFloat64Max, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ }
+}
+
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ X64OperandGenerator g(this);
+ if (IsSupported(AVX)) {
+ Emit(kAVXFloat64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ } else {
+ Emit(kSSEFloat64Min, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+ }
+}
+
+
void InstructionSelector::VisitFloat64Sqrt(Node* node) {
X64OperandGenerator g(this);
Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
@@ -895,7 +937,7 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
namespace {
-void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+void VisitRRFloat64(InstructionSelector* selector, InstructionCode opcode,
Node* node) {
X64OperandGenerator g(selector);
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -905,21 +947,14 @@ void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
} // namespace
-void InstructionSelector::VisitFloat64Floor(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Floor, node);
-}
-
-
-void InstructionSelector::VisitFloat64Ceil(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64Ceil, node);
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundDown), node);
}
void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
- DCHECK(CpuFeatures::IsSupported(SSE4_1));
- VisitRRFloat64(this, kSSEFloat64RoundTruncate, node);
+ VisitRRFloat64(this, kSSEFloat64Round | MiscField::encode(kRoundToZero),
+ node);
}
@@ -928,7 +963,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
-void InstructionSelector::VisitCall(Node* node) {
+void InstructionSelector::VisitCall(Node* node, BasicBlock* handler) {
X64OperandGenerator g(this);
const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
@@ -954,6 +989,13 @@ void InstructionSelector::VisitCall(Node* node) {
Emit(kX64Push, g.NoOutput(), value);
}
+ // Pass label of exception handler block.
+ CallDescriptor::Flags flags = descriptor->flags();
+ if (handler != nullptr) {
+ flags |= CallDescriptor::kHasExceptionHandler;
+ buffer.instruction_args.push_back(g.Label(handler));
+ }
+
// Select the appropriate opcode based on the call type.
InstructionCode opcode;
switch (descriptor->kind()) {
@@ -968,7 +1010,7 @@ void InstructionSelector::VisitCall(Node* node) {
UNREACHABLE();
return;
}
- opcode |= MiscField::encode(descriptor->flags());
+ opcode |= MiscField::encode(flags);
// Emit the call instruction.
InstructionOperand* first_output =
@@ -980,16 +1022,17 @@ void InstructionSelector::VisitCall(Node* node) {
}
+namespace {
+
// Shared routine for multiple compare operations.
-static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- InstructionOperand left, InstructionOperand right,
- FlagsContinuation* cont) {
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
X64OperandGenerator g(selector);
opcode = cont->Encode(opcode);
if (cont->IsBranch()) {
selector->Emit(opcode, g.NoOutput(), left, right,
- g.Label(cont->true_block()),
- g.Label(cont->false_block()))->MarkAsControl();
+ g.Label(cont->true_block()), g.Label(cont->false_block()));
} else {
DCHECK(cont->IsSet());
selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
@@ -998,9 +1041,9 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
// Shared routine for multiple compare operations.
-static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
- Node* left, Node* right, FlagsContinuation* cont,
- bool commutative) {
+void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ Node* left, Node* right, FlagsContinuation* cont,
+ bool commutative) {
X64OperandGenerator g(selector);
if (commutative && g.CanBeBetterLeftOperand(right)) {
std::swap(left, right);
@@ -1010,8 +1053,8 @@ static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
// Shared routine for multiple word compare operations.
-static void VisitWordCompare(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
X64OperandGenerator g(selector);
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
@@ -1029,22 +1072,51 @@ static void VisitWordCompare(InstructionSelector* selector, Node* node,
}
+// Shared routine for 64-bit word comparison operations.
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ X64OperandGenerator g(selector);
+ Int64BinopMatcher m(node);
+ if (m.left().IsLoad() && m.right().IsLoadStackPointer()) {
+ LoadMatcher<ExternalReferenceMatcher> mleft(m.left().node());
+ ExternalReference js_stack_limit =
+ ExternalReference::address_of_stack_limit(selector->isolate());
+ if (mleft.object().Is(js_stack_limit) && mleft.index().Is(0)) {
+ // Compare(Load(js_stack_limit), LoadStackPointer)
+ if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+ InstructionCode opcode = cont->Encode(kX64StackCheck);
+ if (cont->IsBranch()) {
+ selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
+ g.Label(cont->false_block()));
+ } else {
+ DCHECK(cont->IsSet());
+ selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+ }
+ return;
+ }
+ }
+ VisitWordCompare(selector, node, kX64Cmp, cont);
+}
+
+
// Shared routine for comparison with zero.
-static void VisitCompareZero(InstructionSelector* selector, Node* node,
- InstructionCode opcode, FlagsContinuation* cont) {
+void VisitCompareZero(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
X64OperandGenerator g(selector);
VisitCompare(selector, opcode, g.Use(node), g.TempImmediate(0), cont);
}
// Shared routine for multiple float64 compare operations (inputs commuted).
-static void VisitFloat64Compare(InstructionSelector* selector, Node* node,
- FlagsContinuation* cont) {
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
Node* const left = node->InputAt(0);
Node* const right = node->InputAt(1);
VisitCompare(selector, kSSEFloat64Cmp, right, left, cont, false);
}
+} // namespace
+
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
@@ -1055,25 +1127,12 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
FlagsContinuation cont(kNotEqual, tbranch, fbranch);
// Try to combine with comparisons against 0 by simply inverting the branch.
- while (CanCover(user, value)) {
- if (value->opcode() == IrOpcode::kWord32Equal) {
- Int32BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
- } else if (value->opcode() == IrOpcode::kWord64Equal) {
- Int64BinopMatcher m(value);
- if (m.right().Is(0)) {
- user = value;
- value = m.left().node();
- cont.Negate();
- } else {
- break;
- }
+ while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (m.right().Is(0)) {
+ user = value;
+ value = m.left().node();
+ cont.Negate();
} else {
break;
}
@@ -1099,16 +1158,16 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
return VisitWordCompare(this, value, kX64Cmp32, &cont);
case IrOpcode::kWord64Equal:
cont.OverwriteAndNegateIfEqual(kEqual);
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kInt64LessThan:
cont.OverwriteAndNegateIfEqual(kSignedLessThan);
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kInt64LessThanOrEqual:
cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kUint64LessThan:
cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kFloat64Equal:
cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
return VisitFloat64Compare(this, value, &cont);
@@ -1146,7 +1205,7 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
case IrOpcode::kInt32Sub:
return VisitWordCompare(this, value, kX64Cmp32, &cont);
case IrOpcode::kInt64Sub:
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kWord32And:
return VisitWordCompare(this, value, kX64Test32, &cont);
case IrOpcode::kWord64And:
@@ -1161,67 +1220,34 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
}
-void InstructionSelector::VisitSwitch(Node* node, BasicBlock* default_branch,
- BasicBlock** case_branches,
- int32_t* case_values, size_t case_count,
- int32_t min_value, int32_t max_value) {
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
X64OperandGenerator g(this);
InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
- InstructionOperand default_operand = g.Label(default_branch);
-
- // Note that {value_range} can be 0 if {min_value} is -2^31 and {max_value}
- // is 2^31-1, so don't assume that it's non-zero below.
- size_t value_range =
- 1u + bit_cast<uint32_t>(max_value) - bit_cast<uint32_t>(min_value);
- // Determine whether to issue an ArchTableSwitch or an ArchLookupSwitch
- // instruction.
- size_t table_space_cost = 4 + value_range;
+ // Emit either ArchTableSwitch or ArchLookupSwitch.
+ size_t table_space_cost = 4 + sw.value_range;
size_t table_time_cost = 3;
- size_t lookup_space_cost = 3 + 2 * case_count;
- size_t lookup_time_cost = case_count;
- if (case_count > 4 &&
+ size_t lookup_space_cost = 3 + 2 * sw.case_count;
+ size_t lookup_time_cost = sw.case_count;
+ if (sw.case_count > 4 &&
table_space_cost + 3 * table_time_cost <=
lookup_space_cost + 3 * lookup_time_cost &&
- min_value > std::numeric_limits<int32_t>::min()) {
+ sw.min_value > std::numeric_limits<int32_t>::min()) {
InstructionOperand index_operand = g.TempRegister();
- if (min_value) {
+ if (sw.min_value) {
// The leal automatically zero extends, so result is a valid 64-bit index.
Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI), index_operand,
- value_operand, g.TempImmediate(-min_value));
+ value_operand, g.TempImmediate(-sw.min_value));
} else {
// Zero extend, because we use it as 64-bit index into the jump table.
Emit(kX64Movl, index_operand, value_operand);
}
- size_t input_count = 2 + value_range;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = index_operand;
- std::fill(&inputs[1], &inputs[input_count], default_operand);
- for (size_t index = 0; index < case_count; ++index) {
- size_t value = case_values[index] - min_value;
- BasicBlock* branch = case_branches[index];
- DCHECK_LE(0u, value);
- DCHECK_LT(value + 2, input_count);
- inputs[value + 2] = g.Label(branch);
- }
- Emit(kArchTableSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
- return;
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
}
// Generate a sequence of conditional jumps.
- size_t input_count = 2 + case_count * 2;
- auto* inputs = zone()->NewArray<InstructionOperand>(input_count);
- inputs[0] = value_operand;
- inputs[1] = default_operand;
- for (size_t index = 0; index < case_count; ++index) {
- int32_t value = case_values[index];
- BasicBlock* branch = case_branches[index];
- inputs[index * 2 + 2 + 0] = g.TempImmediate(value);
- inputs[index * 2 + 2 + 1] = g.Label(branch);
- }
- Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr)
- ->MarkAsControl();
+ return EmitLookupSwitch(sw, value_operand);
}
@@ -1308,7 +1334,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
if (CanCover(user, value)) {
switch (value->opcode()) {
case IrOpcode::kInt64Sub:
- return VisitWordCompare(this, value, kX64Cmp, &cont);
+ return VisitWord64Compare(this, value, &cont);
case IrOpcode::kWord64And:
return VisitWordCompare(this, value, kX64Test, &cont);
default:
@@ -1317,7 +1343,7 @@ void InstructionSelector::VisitWord64Equal(Node* const node) {
}
return VisitCompareZero(this, value, kX64Cmp, &cont);
}
- VisitWordCompare(this, node, kX64Cmp, &cont);
+ VisitWord64Compare(this, node, &cont);
}
@@ -1343,19 +1369,19 @@ void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
void InstructionSelector::VisitInt64LessThan(Node* node) {
FlagsContinuation cont(kSignedLessThan, node);
- VisitWordCompare(this, node, kX64Cmp, &cont);
+ VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
FlagsContinuation cont(kSignedLessThanOrEqual, node);
- VisitWordCompare(this, node, kX64Cmp, &cont);
+ VisitWord64Compare(this, node, &cont);
}
void InstructionSelector::VisitUint64LessThan(Node* node) {
FlagsContinuation cont(kUnsignedLessThan, node);
- VisitWordCompare(this, node, kX64Cmp, &cont);
+ VisitWord64Compare(this, node, &cont);
}
@@ -1377,16 +1403,55 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
}
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ X64OperandGenerator g(this);
+ Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
+ g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ X64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Float64Matcher mleft(left);
+ if (mleft.HasValue() && (bit_cast<uint64_t>(mleft.Value()) >> 32) == 0u) {
+ Emit(kSSEFloat64LoadLowWord32, g.DefineAsRegister(node), g.Use(right));
+ return;
+ }
+ Emit(kSSEFloat64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.Use(right));
+}
+
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ X64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kSSEFloat64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.Use(right));
+}
+
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kFloat64Max |
+ MachineOperatorBuilder::kFloat64Min |
+ MachineOperatorBuilder::kWord32ShiftIsSafe;
if (CpuFeatures::IsSupported(SSE4_1)) {
- return MachineOperatorBuilder::kFloat64Floor |
- MachineOperatorBuilder::kFloat64Ceil |
- MachineOperatorBuilder::kFloat64RoundTruncate |
- MachineOperatorBuilder::kWord32ShiftIsSafe;
+ flags |= MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat64RoundTruncate;
}
- return MachineOperatorBuilder::kNoFlags;
+ return flags;
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/x64/linkage-x64.cc b/deps/v8/src/compiler/x64/linkage-x64.cc
index 802edf3554..1b840a995d 100644
--- a/deps/v8/src/compiler/x64/linkage-x64.cc
+++ b/deps/v8/src/compiler/x64/linkage-x64.cc
@@ -65,9 +65,10 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
CallDescriptor* Linkage::GetStubCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count, CallDescriptor::Flags flags,
- Operator::Properties properties) {
+ Operator::Properties properties, MachineType return_type) {
return LH::GetStubCallDescriptor(isolate, zone, descriptor,
- stack_parameter_count, flags, properties);
+ stack_parameter_count, flags, properties,
+ return_type);
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 6537e2c3e2..6bd6764546 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -122,8 +122,8 @@ static Maybe<PropertyAttributes> UnscopableLookup(LookupIterator* it) {
Isolate* isolate = it->isolate();
Maybe<PropertyAttributes> attrs = JSReceiver::GetPropertyAttributes(it);
- DCHECK(attrs.has_value || isolate->has_pending_exception());
- if (!attrs.has_value || attrs.value == ABSENT) return attrs;
+ DCHECK(attrs.IsJust() || isolate->has_pending_exception());
+ if (!attrs.IsJust() || attrs.FromJust() == ABSENT) return attrs;
Handle<Symbol> unscopables_symbol = isolate->factory()->unscopables_symbol();
Handle<Object> receiver = it->GetReceiver();
@@ -131,7 +131,7 @@ static Maybe<PropertyAttributes> UnscopableLookup(LookupIterator* it) {
MaybeHandle<Object> maybe_unscopables =
Object::GetProperty(receiver, unscopables_symbol);
if (!maybe_unscopables.ToHandle(&unscopables)) {
- return Maybe<PropertyAttributes>();
+ return Nothing<PropertyAttributes>();
}
if (!unscopables->IsSpecObject()) return attrs;
Handle<Object> blacklist;
@@ -139,10 +139,9 @@ static Maybe<PropertyAttributes> UnscopableLookup(LookupIterator* it) {
Object::GetProperty(unscopables, it->name());
if (!maybe_blacklist.ToHandle(&blacklist)) {
DCHECK(isolate->has_pending_exception());
- return Maybe<PropertyAttributes>();
+ return Nothing<PropertyAttributes>();
}
- if (!blacklist->IsUndefined()) return maybe(ABSENT);
- return attrs;
+ return blacklist->IsUndefined() ? attrs : Just(ABSENT);
}
static void GetAttributesAndBindingFlags(VariableMode mode,
@@ -173,6 +172,10 @@ static void GetAttributesAndBindingFlags(VariableMode mode,
? IMMUTABLE_CHECK_INITIALIZED_HARMONY
: IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
+ case IMPORT:
+ // TODO(ES6)
+ UNREACHABLE();
+ break;
case DYNAMIC:
case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL:
@@ -250,7 +253,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we need
// to only do a local lookup for context extension objects.
- Maybe<PropertyAttributes> maybe;
+ Maybe<PropertyAttributes> maybe = Nothing<PropertyAttributes>();
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
object->IsJSContextExtensionObject()) {
maybe = JSReceiver::GetOwnPropertyAttributes(object, name);
@@ -261,11 +264,11 @@ Handle<Object> Context::Lookup(Handle<String> name,
maybe = JSReceiver::GetPropertyAttributes(object, name);
}
- if (!maybe.has_value) return Handle<Object>();
+ if (!maybe.IsJust()) return Handle<Object>();
DCHECK(!isolate->has_pending_exception());
- *attributes = maybe.value;
+ *attributes = maybe.FromJust();
- if (maybe.value != ABSENT) {
+ if (maybe.FromJust() != ABSENT) {
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
reinterpret_cast<void*>(*object));
@@ -276,7 +279,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
// 2. Check the context proper if it has slots.
if (context->IsFunctionContext() || context->IsBlockContext() ||
- (FLAG_harmony_scoping && context->IsScriptContext())) {
+ context->IsScriptContext()) {
// Use serialized scope information of functions and blocks to search
// for the context index.
Handle<ScopeInfo> scope_info;
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 3d34e0ec77..3b4b7992c0 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -125,10 +125,12 @@ enum BindingFlags {
V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
sloppy_function_with_readonly_prototype_map) \
V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
+ V(STRONG_FUNCTION_MAP_INDEX, Map, strong_function_map) \
V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
sloppy_function_without_prototype_map) \
V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_function_without_prototype_map) \
+ V(STRONG_CONSTRUCTOR_MAP_INDEX, Map, strong_constructor_map) \
V(BOUND_FUNCTION_MAP_INDEX, Map, bound_function_map) \
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map) \
V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map) \
@@ -153,7 +155,7 @@ enum BindingFlags {
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
- V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PROMISE_STATUS_INDEX, Symbol, promise_status) \
V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
@@ -176,6 +178,7 @@ enum BindingFlags {
native_object_notifier_perform_change) \
V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \
+ V(STRONG_GENERATOR_FUNCTION_MAP_INDEX, Map, strong_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
@@ -319,8 +322,10 @@ class Context: public FixedArray {
SLOPPY_FUNCTION_MAP_INDEX,
SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX,
STRICT_FUNCTION_MAP_INDEX,
+ STRONG_FUNCTION_MAP_INDEX,
SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
+ STRONG_CONSTRUCTOR_MAP_INDEX,
BOUND_FUNCTION_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
INITIAL_ARRAY_PROTOTYPE_INDEX,
@@ -385,7 +390,7 @@ class Context: public FixedArray {
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
RUN_MICROTASKS_INDEX,
ENQUEUE_MICROTASK_INDEX,
- IS_PROMISE_INDEX,
+ PROMISE_STATUS_INDEX,
PROMISE_CREATE_INDEX,
PROMISE_RESOLVE_INDEX,
PROMISE_REJECT_INDEX,
@@ -406,6 +411,7 @@ class Context: public FixedArray {
NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE,
SLOPPY_GENERATOR_FUNCTION_MAP_INDEX,
STRICT_GENERATOR_FUNCTION_MAP_INDEX,
+ STRONG_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
ITERATOR_RESULT_MAP_INDEX,
MAP_ITERATOR_MAP_INDEX,
@@ -570,18 +576,27 @@ class Context: public FixedArray {
static int FunctionMapIndex(LanguageMode language_mode, FunctionKind kind) {
if (IsGeneratorFunction(kind)) {
- return is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
+ return is_strong(language_mode) ? STRONG_GENERATOR_FUNCTION_MAP_INDEX :
+ is_strict(language_mode) ? STRICT_GENERATOR_FUNCTION_MAP_INDEX
: SLOPPY_GENERATOR_FUNCTION_MAP_INDEX;
}
+ if (IsConstructor(kind)) {
+ return is_strong(language_mode) ? STRONG_CONSTRUCTOR_MAP_INDEX :
+ is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
+ : SLOPPY_FUNCTION_MAP_INDEX;
+ }
+
if (IsArrowFunction(kind) || IsConciseMethod(kind) ||
IsAccessorFunction(kind)) {
- return is_strict(language_mode)
- ? STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX
- : SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
+ return is_strong(language_mode) ? STRONG_FUNCTION_MAP_INDEX :
+ is_strict(language_mode) ?
+ STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX :
+ SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
}
- return is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
+ return is_strong(language_mode) ? STRONG_FUNCTION_MAP_INDEX :
+ is_strict(language_mode) ? STRICT_FUNCTION_MAP_INDEX
: SLOPPY_FUNCTION_MAP_INDEX;
}
diff --git a/deps/v8/src/conversions.cc b/deps/v8/src/conversions.cc
index 663f4e8dec..82a521bd90 100644
--- a/deps/v8/src/conversions.cc
+++ b/deps/v8/src/conversions.cc
@@ -9,6 +9,7 @@
#include "src/v8.h"
#include "src/assert-scope.h"
+#include "src/char-predicates-inl.h"
#include "src/conversions-inl.h"
#include "src/conversions.h"
#include "src/dtoa.h"
@@ -502,4 +503,54 @@ double StringToDouble(UnicodeCache* unicode_cache, Handle<String> string,
}
+bool IsNonArrayIndexInteger(String* string) {
+ const int kBufferSize = 64;
+ const int kUint32MaxChars = 11;
+ uint16_t buffer[kBufferSize];
+ int offset = 0;
+ const int length = string->length();
+ if (length == 0) return false;
+ // First iteration, check for minus, 0 followed by anything else, etc.
+ int to = std::min(offset + kUint32MaxChars, length);
+ {
+ String::WriteToFlat(string, buffer, offset, to);
+ bool negative = false;
+ if (buffer[offset] == '-') {
+ negative = true;
+ ++offset;
+ if (offset == to) return false; // Just '-' is bad.
+ }
+ if (buffer[offset] == '0') {
+ return to == 2 && negative; // Match just '-0'.
+ }
+ // Process positive integers.
+ if (!negative) {
+ uint64_t acc = 0;
+ for (; offset < to; ++offset) {
+ uint64_t digit = buffer[offset] - '0';
+ if (digit > 9) return false;
+ acc = 10 * acc + digit;
+ }
+ // String is consumed. Evaluate what we have.
+ if (offset == length) {
+ return acc >
+ static_cast<uint64_t>(std::numeric_limits<uint32_t>::max());
+ }
+ }
+ }
+ // Consume rest of string. If we get here, we're way out of uint32_t bounds
+ // or negative.
+ int i = offset;
+ while (true) {
+ for (; offset < to; ++offset, ++i) {
+ if (!IsDecimalDigit(buffer[i])) return false;
+ }
+ if (offset == length) break;
+ // Read next chunk.
+ to = std::min(offset + kBufferSize, length);
+ String::WriteToFlat(string, buffer, offset, to);
+ i = 0;
+ }
+ return true;
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/conversions.h b/deps/v8/src/conversions.h
index 5afd4e1f2c..1609395f81 100644
--- a/deps/v8/src/conversions.h
+++ b/deps/v8/src/conversions.h
@@ -236,6 +236,8 @@ inline size_t NumberToSize(Isolate* isolate,
return result;
}
+
+bool IsNonArrayIndexInteger(String* string);
} } // namespace v8::internal
#endif // V8_CONVERSIONS_H_
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 539b182915..af8e8b4553 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -396,44 +396,29 @@ class AggregatedHistogramTimerScope {
AHT(compile_lazy, V8.CompileLazyMicroSeconds)
-#define HISTOGRAM_PERCENTAGE_LIST(HP) \
- /* Heap fragmentation. */ \
- HP(external_fragmentation_total, \
- V8.MemoryExternalFragmentationTotal) \
- HP(external_fragmentation_old_pointer_space, \
- V8.MemoryExternalFragmentationOldPointerSpace) \
- HP(external_fragmentation_old_data_space, \
- V8.MemoryExternalFragmentationOldDataSpace) \
- HP(external_fragmentation_code_space, \
- V8.MemoryExternalFragmentationCodeSpace) \
- HP(external_fragmentation_map_space, \
- V8.MemoryExternalFragmentationMapSpace) \
- HP(external_fragmentation_cell_space, \
- V8.MemoryExternalFragmentationCellSpace) \
- HP(external_fragmentation_property_cell_space, \
- V8.MemoryExternalFragmentationPropertyCellSpace) \
- HP(external_fragmentation_lo_space, \
- V8.MemoryExternalFragmentationLoSpace) \
- /* Percentages of heap committed to each space. */ \
- HP(heap_fraction_new_space, \
- V8.MemoryHeapFractionNewSpace) \
- HP(heap_fraction_old_pointer_space, \
- V8.MemoryHeapFractionOldPointerSpace) \
- HP(heap_fraction_old_data_space, \
- V8.MemoryHeapFractionOldDataSpace) \
- HP(heap_fraction_code_space, \
- V8.MemoryHeapFractionCodeSpace) \
- HP(heap_fraction_map_space, \
- V8.MemoryHeapFractionMapSpace) \
- HP(heap_fraction_cell_space, \
- V8.MemoryHeapFractionCellSpace) \
- HP(heap_fraction_property_cell_space, \
- V8.MemoryHeapFractionPropertyCellSpace) \
- HP(heap_fraction_lo_space, \
- V8.MemoryHeapFractionLoSpace) \
- /* Percentage of crankshafted codegen. */ \
- HP(codegen_fraction_crankshaft, \
- V8.CodegenFractionCrankshaft) \
+#define HISTOGRAM_PERCENTAGE_LIST(HP) \
+ /* Heap fragmentation. */ \
+ HP(external_fragmentation_total, V8.MemoryExternalFragmentationTotal) \
+ HP(external_fragmentation_old_pointer_space, \
+ V8.MemoryExternalFragmentationOldPointerSpace) \
+ HP(external_fragmentation_old_data_space, \
+ V8.MemoryExternalFragmentationOldDataSpace) \
+ HP(external_fragmentation_code_space, \
+ V8.MemoryExternalFragmentationCodeSpace) \
+ HP(external_fragmentation_map_space, V8.MemoryExternalFragmentationMapSpace) \
+ HP(external_fragmentation_cell_space, \
+ V8.MemoryExternalFragmentationCellSpace) \
+ HP(external_fragmentation_lo_space, V8.MemoryExternalFragmentationLoSpace) \
+ /* Percentages of heap committed to each space. */ \
+ HP(heap_fraction_new_space, V8.MemoryHeapFractionNewSpace) \
+ HP(heap_fraction_old_pointer_space, V8.MemoryHeapFractionOldPointerSpace) \
+ HP(heap_fraction_old_data_space, V8.MemoryHeapFractionOldDataSpace) \
+ HP(heap_fraction_code_space, V8.MemoryHeapFractionCodeSpace) \
+ HP(heap_fraction_map_space, V8.MemoryHeapFractionMapSpace) \
+ HP(heap_fraction_cell_space, V8.MemoryHeapFractionCellSpace) \
+ HP(heap_fraction_lo_space, V8.MemoryHeapFractionLoSpace) \
+ /* Percentage of crankshafted codegen. */ \
+ HP(codegen_fraction_crankshaft, V8.CodegenFractionCrankshaft)
#define HISTOGRAM_MEMORY_LIST(HM) \
@@ -443,8 +428,6 @@ class AggregatedHistogramTimerScope {
V8.MemoryHeapSampleMapSpaceCommitted) \
HM(heap_sample_cell_space_committed, \
V8.MemoryHeapSampleCellSpaceCommitted) \
- HM(heap_sample_property_cell_space_committed, \
- V8.MemoryHeapSamplePropertyCellSpaceCommitted) \
HM(heap_sample_code_space_committed, \
V8.MemoryHeapSampleCodeSpaceCommitted) \
HM(heap_sample_maximum_committed, \
@@ -587,6 +570,7 @@ class AggregatedHistogramTimerScope {
SC(math_asin, V8.MathAsin) \
SC(math_atan, V8.MathAtan) \
SC(math_atan2, V8.MathAtan2) \
+ SC(math_clz32, V8.MathClz32) \
SC(math_exp, V8.MathExp) \
SC(math_floor, V8.MathFloor) \
SC(math_log, V8.MathLog) \
@@ -623,11 +607,6 @@ class AggregatedHistogramTimerScope {
SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable) \
SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted) \
SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed) \
- SC(property_cell_space_bytes_available, \
- V8.MemoryPropertyCellSpaceBytesAvailable) \
- SC(property_cell_space_bytes_committed, \
- V8.MemoryPropertyCellSpaceBytesCommitted) \
- SC(property_cell_space_bytes_used, V8.MemoryPropertyCellSpaceBytesUsed) \
SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable) \
SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted) \
SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
diff --git a/deps/v8/src/cpu-profiler-inl.h b/deps/v8/src/cpu-profiler-inl.h
index 0320ed5a79..075f285489 100644
--- a/deps/v8/src/cpu-profiler-inl.h
+++ b/deps/v8/src/cpu-profiler-inl.h
@@ -17,9 +17,6 @@ namespace internal {
void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddCode(start, entry, size);
- if (shared != NULL) {
- entry->set_shared_id(code_map->GetSharedId(shared));
- }
}
@@ -38,12 +35,7 @@ void CodeDisableOptEventRecord::UpdateCodeMap(CodeMap* code_map) {
void CodeDeoptEventRecord::UpdateCodeMap(CodeMap* code_map) {
CodeEntry* entry = code_map->FindEntry(start);
- if (entry != NULL) entry->set_deopt_info(deopt_reason, raw_position);
-}
-
-
-void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
- code_map->MoveCode(from, to);
+ if (entry != NULL) entry->set_deopt_info(deopt_reason, position, pc_offset);
}
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index c276bb6d60..03d70521ae 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -201,7 +201,6 @@ void CpuProfiler::CallbackEvent(Name* name, Address entry_point) {
Logger::CALLBACK_TAG,
profiles_->GetName(name));
rec->size = 1;
- rec->shared = NULL;
processor_->Enqueue(evt_rec);
}
@@ -218,7 +217,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
rec->size = code->ExecutableSize();
- rec->shared = NULL;
processor_->Enqueue(evt_rec);
}
@@ -235,7 +233,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
rec->size = code->ExecutableSize();
- rec->shared = NULL;
processor_->Enqueue(evt_rec);
}
@@ -254,16 +251,10 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
NULL, code->instruction_start());
if (info) {
rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
+ rec->entry->set_inlined_function_infos(info->inlined_function_infos());
}
- if (shared->script()->IsScript()) {
- DCHECK(Script::cast(shared->script()));
- Script* script = Script::cast(shared->script());
- rec->entry->set_script_id(script->id()->value());
- rec->entry->set_bailout_reason(
- GetBailoutReason(shared->disable_optimization_reason()));
- }
+ rec->entry->FillFunctionInfo(shared);
rec->size = code->ExecutableSize();
- rec->shared = shared->address();
processor_->Enqueue(evt_rec);
}
@@ -298,12 +289,10 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
column, line_table, code->instruction_start());
if (info) {
rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
+ rec->entry->set_inlined_function_infos(info->inlined_function_infos());
}
- rec->entry->set_script_id(script->id()->value());
+ rec->entry->FillFunctionInfo(shared);
rec->size = code->ExecutableSize();
- rec->shared = shared->address();
- rec->entry->set_bailout_reason(
- GetBailoutReason(shared->disable_optimization_reason()));
processor_->Enqueue(evt_rec);
}
@@ -320,7 +309,6 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
CodeEntry::kEmptyResourceName, CpuProfileNode::kNoLineNumberInfo,
CpuProfileNode::kNoColumnNumberInfo, NULL, code->instruction_start());
rec->size = code->ExecutableSize();
- rec->shared = NULL;
processor_->Enqueue(evt_rec);
}
@@ -343,14 +331,14 @@ void CpuProfiler::CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
}
-void CpuProfiler::CodeDeoptEvent(Code* code, int bailout_id, Address pc,
- int fp_to_sp_delta) {
+void CpuProfiler::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
CodeEventsContainer evt_rec(CodeEventRecord::CODE_DEOPT);
CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
- Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, bailout_id);
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(code, pc);
rec->start = code->address();
rec->deopt_reason = Deoptimizer::GetDeoptReason(info.deopt_reason);
- rec->raw_position = info.raw_position;
+ rec->position = info.position;
+ rec->pc_offset = pc - code->instruction_start();
processor_->Enqueue(evt_rec);
processor_->AddDeoptStack(isolate_, pc, fp_to_sp_delta);
}
@@ -360,16 +348,6 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
}
-void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
- CodeEventsContainer evt_rec(CodeEventRecord::SHARED_FUNC_MOVE);
- SharedFunctionInfoMoveEventRecord* rec =
- &evt_rec.SharedFunctionInfoMoveEventRecord_;
- rec->from = from;
- rec->to = to;
- processor_->Enqueue(evt_rec);
-}
-
-
void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
if (FilterOutCodeCreateEvent(Logger::CALLBACK_TAG)) return;
CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
@@ -380,7 +358,6 @@ void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) {
profiles_->GetName(name),
"get ");
rec->size = 1;
- rec->shared = NULL;
processor_->Enqueue(evt_rec);
}
@@ -409,7 +386,6 @@ void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) {
profiles_->GetName(name),
"set ");
rec->size = 1;
- rec->shared = NULL;
processor_->Enqueue(evt_rec);
}
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 140de3b906..26ec7f900a 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -9,6 +9,7 @@
#include "src/base/atomicops.h"
#include "src/base/platform/time.h"
#include "src/circular-queue.h"
+#include "src/compiler.h"
#include "src/sampler.h"
#include "src/unbound-queue.h"
@@ -28,7 +29,6 @@ class ProfileGenerator;
V(CODE_MOVE, CodeMoveEventRecord) \
V(CODE_DISABLE_OPT, CodeDisableOptEventRecord) \
V(CODE_DEOPT, CodeDeoptEventRecord) \
- V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) \
V(REPORT_BUILTIN, ReportBuiltinEventRecord)
@@ -52,7 +52,6 @@ class CodeCreateEventRecord : public CodeEventRecord {
Address start;
CodeEntry* entry;
unsigned size;
- Address shared;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -80,16 +79,8 @@ class CodeDeoptEventRecord : public CodeEventRecord {
public:
Address start;
const char* deopt_reason;
- int raw_position;
-
- INLINE(void UpdateCodeMap(CodeMap* code_map));
-};
-
-
-class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
- public:
- Address from;
- Address to;
+ SourcePosition position;
+ size_t pc_offset;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@@ -245,13 +236,12 @@ class CpuProfiler : public CodeEventListener {
virtual void CodeMovingGCEvent() {}
virtual void CodeMoveEvent(Address from, Address to);
virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared);
- virtual void CodeDeoptEvent(Code* code, int bailout_id, Address pc,
- int fp_to_sp_delta);
+ virtual void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
virtual void CodeDeleteEvent(Address from);
virtual void GetterCallbackEvent(Name* name, Address entry_point);
virtual void RegExpCodeCreateEvent(Code* code, String* source);
virtual void SetterCallbackEvent(Name* name, Address entry_point);
- virtual void SharedFunctionInfoMoveEvent(Address from, Address to);
+ virtual void SharedFunctionInfoMoveEvent(Address from, Address to) {}
INLINE(bool is_profiling() const) { return is_profiling_; }
bool* is_profiling_address() {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 7fa6f8c42e..38a4b8ec1a 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -45,7 +45,7 @@
#include "src/basic-block-profiler.h"
#include "src/d8-debug.h"
#include "src/debug.h"
-#include "src/natives.h"
+#include "src/snapshot/natives.h"
#include "src/v8.h"
#endif // !V8_SHARED
@@ -72,6 +72,10 @@
namespace v8 {
+namespace {
+v8::Platform* g_platform = NULL;
+} // namespace
+
static Handle<Value> Throw(Isolate* isolate, const char* message) {
return isolate->ThrowException(String::NewFromUtf8(isolate, message));
@@ -963,15 +967,9 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
void Shell::Initialize(Isolate* isolate) {
#ifndef V8_SHARED
- Shell::counter_map_ = new CounterMap();
// Set up counters
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(isolate, i::FLAG_map_counters);
- if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
- isolate->SetCounterFunction(LookupCounter);
- isolate->SetCreateHistogramFunction(CreateHistogram);
- isolate->SetAddHistogramSampleFunction(AddHistogramSample);
- }
#endif // !V8_SHARED
}
@@ -1295,9 +1293,11 @@ void SourceGroup::ExecuteInThread() {
}
}
if (Shell::options.send_idle_notification) {
- const int kLongIdlePauseInMs = 1000;
+ const double kLongIdlePauseInSeconds = 1.0;
isolate->ContextDisposedNotification();
- isolate->IdleNotification(kLongIdlePauseInMs);
+ isolate->IdleNotificationDeadline(
+ g_platform->MonotonicallyIncreasingTime() +
+ kLongIdlePauseInSeconds);
}
if (Shell::options.invoke_weak_callbacks) {
// By sending a low memory notifications, we will try hard to collect
@@ -1493,9 +1493,10 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
}
}
if (options.send_idle_notification) {
- const int kLongIdlePauseInMs = 1000;
+ const double kLongIdlePauseInSeconds = 1.0;
isolate->ContextDisposedNotification();
- isolate->IdleNotification(kLongIdlePauseInMs);
+ isolate->IdleNotificationDeadline(
+ g_platform->MonotonicallyIncreasingTime() + kLongIdlePauseInSeconds);
}
if (options.invoke_weak_callbacks) {
// By sending a low memory notifications, we will try hard to collect all
@@ -1607,8 +1608,8 @@ int Shell::Main(int argc, char* argv[]) {
#endif // defined(_WIN32) || defined(_WIN64)
if (!SetOptions(argc, argv)) return 1;
v8::V8::InitializeICU(options.icu_data_file);
- v8::Platform* platform = v8::platform::CreateDefaultPlatform();
- v8::V8::InitializePlatform(platform);
+ g_platform = v8::platform::CreateDefaultPlatform();
+ v8::V8::InitializePlatform(g_platform);
v8::V8::Initialize();
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
v8::StartupDataHandler startup_data(argv[0], options.natives_blob,
@@ -1639,6 +1640,13 @@ int Shell::Main(int argc, char* argv[]) {
base::SysInfo::AmountOfPhysicalMemory(),
base::SysInfo::AmountOfVirtualMemory(),
base::SysInfo::NumberOfProcessors());
+
+ Shell::counter_map_ = new CounterMap();
+ if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
+ create_params.counter_lookup_callback = LookupCounter;
+ create_params.create_histogram_callback = CreateHistogram;
+ create_params.add_histogram_sample_callback = AddHistogramSample;
+ }
#endif
Isolate* isolate = Isolate::New(create_params);
DumbLineEditor dumb_line_editor(isolate);
@@ -1704,7 +1712,7 @@ int Shell::Main(int argc, char* argv[]) {
isolate->Dispose();
V8::Dispose();
V8::ShutdownPlatform();
- delete platform;
+ delete g_platform;
return result;
}
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index 40ab1d25d6..eafe798500 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -2,16 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-"use strict";
-
// This file relies on the fact that the following declarations have been made
// in v8natives.js:
// var $isFinite = GlobalIsFinite;
-var $Date = global.Date;
+var $createDate;
// -------------------------------------------------------------------
+(function() {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalDate = global.Date;
+
// This file contains date support implemented in JavaScript.
// Helper function to throw error.
@@ -19,7 +25,6 @@ function ThrowDateTypeError() {
throw new $TypeError('this is not a Date object.');
}
-
var timezone_cache_time = NAN;
var timezone_cache_timezone;
@@ -121,7 +126,7 @@ var Date_cache = {
function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
if (!%_IsConstructCall()) {
// ECMA 262 - 15.9.2
- return (new $Date()).toString();
+ return (new GlobalDate()).toString();
}
// ECMA 262 - 15.9.3
@@ -230,8 +235,8 @@ function LocalTimezoneString(date) {
var timezoneOffset = -TIMEZONE_OFFSET(date);
var sign = (timezoneOffset >= 0) ? 1 : -1;
- var hours = FLOOR((sign * timezoneOffset)/60);
- var min = FLOOR((sign * timezoneOffset)%60);
+ var hours = $floor((sign * timezoneOffset)/60);
+ var min = $floor((sign * timezoneOffset)%60);
var gmt = ' GMT' + ((sign == 1) ? '+' : '-') +
TwoDigitString(hours) + TwoDigitString(min);
return gmt + ' (' + timezone + ')';
@@ -684,7 +689,7 @@ function DateToGMTString() {
function PadInt(n, digits) {
if (digits == 1) return n;
- return n < MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
+ return n < %_MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
}
@@ -731,6 +736,7 @@ var date_cache_version = NAN;
function CheckDateCacheCurrent() {
if (!date_cache_version_holder) {
date_cache_version_holder = %DateCacheVersion();
+ if (!date_cache_version_holder) return;
}
if (date_cache_version_holder[0] == date_cache_version) {
return;
@@ -748,79 +754,78 @@ function CheckDateCacheCurrent() {
function CreateDate(time) {
- var date = new $Date();
+ var date = new GlobalDate();
date.setTime(time);
return date;
}
// -------------------------------------------------------------------
-function SetUpDate() {
- %CheckIsBootstrapping();
-
- %SetCode($Date, DateConstructor);
- %FunctionSetPrototype($Date, new $Date(NAN));
-
- // Set up non-enumerable properties of the Date object itself.
- InstallFunctions($Date, DONT_ENUM, $Array(
- "UTC", DateUTC,
- "parse", DateParse,
- "now", DateNow
- ));
-
- // Set up non-enumerable constructor property of the Date prototype object.
- %AddNamedProperty($Date.prototype, "constructor", $Date, DONT_ENUM);
-
- // Set up non-enumerable functions of the Date prototype object and
- // set their names.
- InstallFunctions($Date.prototype, DONT_ENUM, $Array(
- "toString", DateToString,
- "toDateString", DateToDateString,
- "toTimeString", DateToTimeString,
- "toLocaleString", DateToLocaleString,
- "toLocaleDateString", DateToLocaleDateString,
- "toLocaleTimeString", DateToLocaleTimeString,
- "valueOf", DateValueOf,
- "getTime", DateGetTime,
- "getFullYear", DateGetFullYear,
- "getUTCFullYear", DateGetUTCFullYear,
- "getMonth", DateGetMonth,
- "getUTCMonth", DateGetUTCMonth,
- "getDate", DateGetDate,
- "getUTCDate", DateGetUTCDate,
- "getDay", DateGetDay,
- "getUTCDay", DateGetUTCDay,
- "getHours", DateGetHours,
- "getUTCHours", DateGetUTCHours,
- "getMinutes", DateGetMinutes,
- "getUTCMinutes", DateGetUTCMinutes,
- "getSeconds", DateGetSeconds,
- "getUTCSeconds", DateGetUTCSeconds,
- "getMilliseconds", DateGetMilliseconds,
- "getUTCMilliseconds", DateGetUTCMilliseconds,
- "getTimezoneOffset", DateGetTimezoneOffset,
- "setTime", DateSetTime,
- "setMilliseconds", DateSetMilliseconds,
- "setUTCMilliseconds", DateSetUTCMilliseconds,
- "setSeconds", DateSetSeconds,
- "setUTCSeconds", DateSetUTCSeconds,
- "setMinutes", DateSetMinutes,
- "setUTCMinutes", DateSetUTCMinutes,
- "setHours", DateSetHours,
- "setUTCHours", DateSetUTCHours,
- "setDate", DateSetDate,
- "setUTCDate", DateSetUTCDate,
- "setMonth", DateSetMonth,
- "setUTCMonth", DateSetUTCMonth,
- "setFullYear", DateSetFullYear,
- "setUTCFullYear", DateSetUTCFullYear,
- "toGMTString", DateToGMTString,
- "toUTCString", DateToUTCString,
- "getYear", DateGetYear,
- "setYear", DateSetYear,
- "toISOString", DateToISOString,
- "toJSON", DateToJSON
- ));
-}
-
-SetUpDate();
+%SetCode(GlobalDate, DateConstructor);
+%FunctionSetPrototype(GlobalDate, new GlobalDate(NAN));
+
+// Set up non-enumerable properties of the Date object itself.
+InstallFunctions(GlobalDate, DONT_ENUM, $Array(
+ "UTC", DateUTC,
+ "parse", DateParse,
+ "now", DateNow
+));
+
+// Set up non-enumerable constructor property of the Date prototype object.
+%AddNamedProperty(GlobalDate.prototype, "constructor", GlobalDate, DONT_ENUM);
+
+// Set up non-enumerable functions of the Date prototype object and
+// set their names.
+InstallFunctions(GlobalDate.prototype, DONT_ENUM, $Array(
+ "toString", DateToString,
+ "toDateString", DateToDateString,
+ "toTimeString", DateToTimeString,
+ "toLocaleString", DateToLocaleString,
+ "toLocaleDateString", DateToLocaleDateString,
+ "toLocaleTimeString", DateToLocaleTimeString,
+ "valueOf", DateValueOf,
+ "getTime", DateGetTime,
+ "getFullYear", DateGetFullYear,
+ "getUTCFullYear", DateGetUTCFullYear,
+ "getMonth", DateGetMonth,
+ "getUTCMonth", DateGetUTCMonth,
+ "getDate", DateGetDate,
+ "getUTCDate", DateGetUTCDate,
+ "getDay", DateGetDay,
+ "getUTCDay", DateGetUTCDay,
+ "getHours", DateGetHours,
+ "getUTCHours", DateGetUTCHours,
+ "getMinutes", DateGetMinutes,
+ "getUTCMinutes", DateGetUTCMinutes,
+ "getSeconds", DateGetSeconds,
+ "getUTCSeconds", DateGetUTCSeconds,
+ "getMilliseconds", DateGetMilliseconds,
+ "getUTCMilliseconds", DateGetUTCMilliseconds,
+ "getTimezoneOffset", DateGetTimezoneOffset,
+ "setTime", DateSetTime,
+ "setMilliseconds", DateSetMilliseconds,
+ "setUTCMilliseconds", DateSetUTCMilliseconds,
+ "setSeconds", DateSetSeconds,
+ "setUTCSeconds", DateSetUTCSeconds,
+ "setMinutes", DateSetMinutes,
+ "setUTCMinutes", DateSetUTCMinutes,
+ "setHours", DateSetHours,
+ "setUTCHours", DateSetUTCHours,
+ "setDate", DateSetDate,
+ "setUTCDate", DateSetUTCDate,
+ "setMonth", DateSetMonth,
+ "setUTCMonth", DateSetUTCMonth,
+ "setFullYear", DateSetFullYear,
+ "setUTCFullYear", DateSetUTCFullYear,
+ "toGMTString", DateToGMTString,
+ "toUTCString", DateToUTCString,
+ "getYear", DateGetYear,
+ "setYear", DateSetYear,
+ "toISOString", DateToISOString,
+ "toJSON", DateToJSON
+));
+
+// Expose to the global scope.
+$createDate = CreateDate;
+
+})();
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index c24b478553..76b5fe1410 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -264,7 +264,7 @@ function ScriptBreakPoint(type, script_id_or_name, opt_line, opt_column,
}
-//Creates a clone of script breakpoint that is linked to another script.
+// Creates a clone of script breakpoint that is linked to another script.
ScriptBreakPoint.prototype.cloneForOtherScript = function (other_script) {
var copy = new ScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
other_script.id, this.line_, this.column_, this.groupId_,
@@ -499,10 +499,6 @@ Debug.setListener = function(listener, opt_data) {
};
-Debug.breakExecution = function(f) {
- %Break();
-};
-
Debug.breakLocations = function(f, opt_position_aligment) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
var position_aligment = IS_UNDEFINED(opt_position_aligment)
@@ -552,25 +548,12 @@ Debug.scriptSource = function(func_or_script_name) {
return this.findScript(func_or_script_name).source;
};
+
Debug.source = function(f) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
return %FunctionGetSourceCode(f);
};
-Debug.disassemble = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %DebugDisassembleFunction(f);
-};
-
-Debug.disassembleConstructor = function(f) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %DebugDisassembleConstructor(f);
-};
-
-Debug.ExecuteInDebugContext = function(f, without_debugger) {
- if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
- return %ExecuteInDebugContext(f, !!without_debugger);
-};
Debug.sourcePosition = function(f) {
if (!IS_FUNCTION(f)) throw new Error('Parameters have wrong types.');
@@ -2584,7 +2567,3 @@ function ValueToProtocolValue_(value, mirror_serializer) {
}
return json;
}
-
-Debug.TestApi = {
- CommandProcessorResolveValue: DebugCommandProcessor.resolveValue_
-};
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index a7bf765581..2796d2dc1c 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -20,7 +20,7 @@
#include "src/list.h"
#include "src/log.h"
#include "src/messages.h"
-#include "src/natives.h"
+#include "src/snapshot/natives.h"
#include "include/v8-debug.h"
@@ -60,47 +60,29 @@ static v8::Handle<v8::Context> GetDebugEventContext(Isolate* isolate) {
}
-BreakLocationIterator::BreakLocationIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type) {
- debug_info_ = debug_info;
- type_ = type;
- reloc_iterator_ = NULL;
- reloc_iterator_original_ = NULL;
- Reset(); // Initialize the rest of the member variables.
-}
-
-
-BreakLocationIterator::~BreakLocationIterator() {
- DCHECK(reloc_iterator_ != NULL);
- DCHECK(reloc_iterator_original_ != NULL);
- delete reloc_iterator_;
- delete reloc_iterator_original_;
-}
-
-
-// Check whether a code stub with the specified major key is a possible break
-// point location when looking for source break locations.
-static bool IsSourceBreakStub(Code* code) {
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- return major_key == CodeStub::CallFunction;
-}
-
-
-// Check whether a code stub with the specified major key is a possible break
-// location.
-static bool IsBreakStub(Code* code) {
- CodeStub::Major major_key = CodeStub::GetMajorKey(code);
- return major_key == CodeStub::CallFunction;
+BreakLocation::Iterator::Iterator(Handle<DebugInfo> debug_info,
+ BreakLocatorType type)
+ : debug_info_(debug_info),
+ type_(type),
+ reloc_iterator_(debug_info->code(),
+ ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE)),
+ reloc_iterator_original_(
+ debug_info->original_code(),
+ ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE)),
+ break_index_(-1),
+ position_(1),
+ statement_position_(1) {
+ Next();
}
-void BreakLocationIterator::Next() {
+void BreakLocation::Iterator::Next() {
DisallowHeapAllocation no_gc;
DCHECK(!RinfoDone());
// Iterate through reloc info for code and original code stopping at each
// breakable code target.
- bool first = break_point_ == -1;
+ bool first = break_index_ == -1;
while (!RinfoDone()) {
if (!first) RinfoNext();
first = false;
@@ -115,8 +97,8 @@ void BreakLocationIterator::Next() {
}
// Always update the position as we don't want that to be before the
// statement position.
- position_ = static_cast<int>(
- rinfo()->data() - debug_info_->shared()->start_position());
+ position_ = static_cast<int>(rinfo()->data() -
+ debug_info_->shared()->start_position());
DCHECK(position_ >= 0);
DCHECK(statement_position_ >= 0);
}
@@ -131,7 +113,7 @@ void BreakLocationIterator::Next() {
position_ = 0;
}
statement_position_ = position_;
- break_point_++;
+ break_index_++;
return;
}
@@ -143,7 +125,7 @@ void BreakLocationIterator::Next() {
Code* code = Code::GetCodeFromTargetAddress(target);
if (RelocInfo::IsConstructCall(rmode()) || code->is_call_stub()) {
- break_point_++;
+ break_index_++;
return;
}
@@ -152,144 +134,117 @@ void BreakLocationIterator::Next() {
if ((code->is_inline_cache_stub() && !code->is_binary_op_stub() &&
!code->is_compare_ic_stub() && !code->is_to_boolean_ic_stub())) {
- break_point_++;
+ break_index_++;
return;
}
if (code->kind() == Code::STUB) {
- if (IsDebuggerStatement()) {
- break_point_++;
+ if (RelocInfo::IsDebuggerStatement(rmode())) {
+ break_index_++;
+ return;
+ } else if (CodeStub::GetMajorKey(code) == CodeStub::CallFunction) {
+ break_index_++;
return;
- } else if (type_ == ALL_BREAK_LOCATIONS) {
- if (IsBreakStub(code)) {
- break_point_++;
- return;
- }
- } else {
- DCHECK(type_ == SOURCE_BREAK_LOCATIONS);
- if (IsSourceBreakStub(code)) {
- break_point_++;
- return;
- }
}
}
}
- if (IsDebugBreakSlot() && type_ != CALLS_AND_RETURNS) {
+ if (RelocInfo::IsDebugBreakSlot(rmode()) && type_ != CALLS_AND_RETURNS) {
// There is always a possible break point at a debug break slot.
- break_point_++;
+ break_index_++;
return;
}
}
}
-void BreakLocationIterator::Next(int count) {
- while (count > 0) {
- Next();
- count--;
- }
+// Find the break point at the supplied address, or the closest one before
+// the address.
+BreakLocation BreakLocation::FromAddress(Handle<DebugInfo> debug_info,
+ BreakLocatorType type, Address pc) {
+ Iterator it(debug_info, type);
+ it.SkipTo(BreakIndexFromAddress(debug_info, type, pc));
+ return it.GetBreakLocation();
}
// Find the break point at the supplied address, or the closest one before
// the address.
-void BreakLocationIterator::FindBreakLocationFromAddress(Address pc) {
+void BreakLocation::FromAddressSameStatement(Handle<DebugInfo> debug_info,
+ BreakLocatorType type, Address pc,
+ List<BreakLocation>* result_out) {
+ int break_index = BreakIndexFromAddress(debug_info, type, pc);
+ Iterator it(debug_info, type);
+ it.SkipTo(break_index);
+ int statement_position = it.statement_position();
+ while (!it.Done() && it.statement_position() == statement_position) {
+ result_out->Add(it.GetBreakLocation());
+ it.Next();
+ }
+}
+
+
+int BreakLocation::BreakIndexFromAddress(Handle<DebugInfo> debug_info,
+ BreakLocatorType type, Address pc) {
// Run through all break points to locate the one closest to the address.
- int closest_break_point = 0;
+ int closest_break = 0;
int distance = kMaxInt;
- while (!Done()) {
+ for (Iterator it(debug_info, type); !it.Done(); it.Next()) {
// Check if this break point is closer that what was previously found.
- if (this->pc() <= pc && pc - this->pc() < distance) {
- closest_break_point = break_point();
- distance = static_cast<int>(pc - this->pc());
+ if (it.pc() <= pc && pc - it.pc() < distance) {
+ closest_break = it.break_index();
+ distance = static_cast<int>(pc - it.pc());
// Check whether we can't get any closer.
if (distance == 0) break;
}
- Next();
}
-
- // Move to the break point found.
- Reset();
- Next(closest_break_point);
+ return closest_break;
}
-// Find the break point closest to the supplied source position.
-void BreakLocationIterator::FindBreakLocationFromPosition(int position,
- BreakPositionAlignment alignment) {
+BreakLocation BreakLocation::FromPosition(Handle<DebugInfo> debug_info,
+ BreakLocatorType type, int position,
+ BreakPositionAlignment alignment) {
// Run through all break points to locate the one closest to the source
// position.
- int closest_break_point = 0;
+ int closest_break = 0;
int distance = kMaxInt;
- while (!Done()) {
+ for (Iterator it(debug_info, type); !it.Done(); it.Next()) {
int next_position;
- switch (alignment) {
- case STATEMENT_ALIGNED:
- next_position = this->statement_position();
- break;
- case BREAK_POSITION_ALIGNED:
- next_position = this->position();
- break;
- default:
- UNREACHABLE();
- next_position = this->statement_position();
+ if (alignment == STATEMENT_ALIGNED) {
+ next_position = it.statement_position();
+ } else {
+ DCHECK(alignment == BREAK_POSITION_ALIGNED);
+ next_position = it.position();
}
- // Check if this break point is closer that what was previously found.
if (position <= next_position && next_position - position < distance) {
- closest_break_point = break_point();
+ closest_break = it.break_index();
distance = next_position - position;
// Check whether we can't get any closer.
if (distance == 0) break;
}
- Next();
}
- // Move to the break point found.
- Reset();
- Next(closest_break_point);
-}
-
-
-void BreakLocationIterator::Reset() {
- // Create relocation iterators for the two code objects.
- if (reloc_iterator_ != NULL) delete reloc_iterator_;
- if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
- reloc_iterator_ = new RelocIterator(
- debug_info_->code(),
- ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
- reloc_iterator_original_ = new RelocIterator(
- debug_info_->original_code(),
- ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
-
- // Position at the first break point.
- break_point_ = -1;
- position_ = 1;
- statement_position_ = 1;
- Next();
-}
-
-
-bool BreakLocationIterator::Done() const {
- return RinfoDone();
+ Iterator it(debug_info, type);
+ it.SkipTo(closest_break);
+ return it.GetBreakLocation();
}
-void BreakLocationIterator::SetBreakPoint(Handle<Object> break_point_object) {
+void BreakLocation::SetBreakPoint(Handle<Object> break_point_object) {
// If there is not already a real break point here patch code with debug
// break.
if (!HasBreakPoint()) SetDebugBreak();
DCHECK(IsDebugBreak() || IsDebuggerStatement());
// Set the break point information.
- DebugInfo::SetBreakPoint(debug_info_, code_position(),
- position(), statement_position(),
- break_point_object);
+ DebugInfo::SetBreakPoint(debug_info_, pc_offset_, position_,
+ statement_position_, break_point_object);
}
-void BreakLocationIterator::ClearBreakPoint(Handle<Object> break_point_object) {
+void BreakLocation::ClearBreakPoint(Handle<Object> break_point_object) {
// Clear the break point information.
- DebugInfo::ClearBreakPoint(debug_info_, code_position(), break_point_object);
+ DebugInfo::ClearBreakPoint(debug_info_, pc_offset_, break_point_object);
// If there are no more break points here remove the debug break.
if (!HasBreakPoint()) {
ClearDebugBreak();
@@ -298,7 +253,7 @@ void BreakLocationIterator::ClearBreakPoint(Handle<Object> break_point_object) {
}
-void BreakLocationIterator::SetOneShot() {
+void BreakLocation::SetOneShot() {
// Debugger statement always calls debugger. No need to modify it.
if (IsDebuggerStatement()) return;
@@ -313,7 +268,7 @@ void BreakLocationIterator::SetOneShot() {
}
-void BreakLocationIterator::ClearOneShot() {
+void BreakLocation::ClearOneShot() {
// Debugger statement always calls debugger. No need to modify it.
if (IsDebuggerStatement()) return;
@@ -329,7 +284,7 @@ void BreakLocationIterator::ClearOneShot() {
}
-void BreakLocationIterator::SetDebugBreak() {
+void BreakLocation::SetDebugBreak() {
// Debugger statement always calls debugger. No need to modify it.
if (IsDebuggerStatement()) return;
@@ -339,7 +294,7 @@ void BreakLocationIterator::SetDebugBreak() {
// handler as the handler and the function is the same.
if (IsDebugBreak()) return;
- if (RelocInfo::IsJSReturn(rmode())) {
+ if (IsExit()) {
// Patch the frame exit code with a break point.
SetDebugBreakAtReturn();
} else if (IsDebugBreakSlot()) {
@@ -353,93 +308,52 @@ void BreakLocationIterator::SetDebugBreak() {
}
-void BreakLocationIterator::ClearDebugBreak() {
+void BreakLocation::ClearDebugBreak() {
// Debugger statement always calls debugger. No need to modify it.
if (IsDebuggerStatement()) return;
- if (RelocInfo::IsJSReturn(rmode())) {
- // Restore the frame exit code.
- ClearDebugBreakAtReturn();
+ if (IsExit()) {
+ // Restore the frame exit code with a break point.
+ RestoreFromOriginal(Assembler::kJSReturnSequenceLength);
} else if (IsDebugBreakSlot()) {
// Restore the code in the break slot.
- ClearDebugBreakAtSlot();
+ RestoreFromOriginal(Assembler::kDebugBreakSlotLength);
} else {
- // Patch the IC call.
- ClearDebugBreakAtIC();
+ // Restore the IC call.
+ rinfo().set_target_address(original_rinfo().target_address());
+ // Some ICs store data in the feedback vector. Clear this to ensure we
+ // won't miss future stepping requirements.
+ SharedFunctionInfo* shared = debug_info_->shared();
+ shared->feedback_vector()->ClearICSlots(shared);
}
DCHECK(!IsDebugBreak());
}
-bool BreakLocationIterator::IsStepInLocation(Isolate* isolate) {
- if (RelocInfo::IsConstructCall(original_rmode())) {
- return true;
- } else if (RelocInfo::IsCodeTarget(rmode())) {
+void BreakLocation::RestoreFromOriginal(int length_in_bytes) {
+ memcpy(pc(), original_pc(), length_in_bytes);
+ CpuFeatures::FlushICache(pc(), length_in_bytes);
+}
+
+
+bool BreakLocation::IsStepInLocation() const {
+ if (IsConstructCall()) return true;
+ if (RelocInfo::IsCodeTarget(rmode())) {
HandleScope scope(debug_info_->GetIsolate());
- Address target = original_rinfo()->target_address();
- Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
- if (target_code->kind() == Code::STUB) {
- return CodeStub::GetMajorKey(*target_code) == CodeStub::CallFunction;
- }
+ Handle<Code> target_code = CodeTarget();
return target_code->is_call_stub();
}
return false;
}
-void BreakLocationIterator::PrepareStepIn(Isolate* isolate) {
-#ifdef DEBUG
- HandleScope scope(isolate);
- // Step in can only be prepared if currently positioned on an IC call,
- // construct call or CallFunction stub call.
- Address target = rinfo()->target_address();
- Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
- // All the following stuff is needed only for assertion checks so the code
- // is wrapped in ifdef.
- Handle<Code> maybe_call_function_stub = target_code;
- if (IsDebugBreak()) {
- Address original_target = original_rinfo()->target_address();
- maybe_call_function_stub =
- Handle<Code>(Code::GetCodeFromTargetAddress(original_target));
- }
- bool is_call_function_stub =
- (maybe_call_function_stub->kind() == Code::STUB &&
- CodeStub::GetMajorKey(*maybe_call_function_stub) ==
- CodeStub::CallFunction);
-
- // Step in through construct call requires no changes to the running code.
- // Step in through getters/setters should already be prepared as well
- // because caller of this function (Debug::PrepareStep) is expected to
- // flood the top frame's function with one shot breakpoints.
- // Step in through CallFunction stub should also be prepared by caller of
- // this function (Debug::PrepareStep) which should flood target function
- // with breakpoints.
- DCHECK(RelocInfo::IsConstructCall(rmode()) ||
- target_code->is_inline_cache_stub() ||
- is_call_function_stub);
-#endif
-}
-
-
-// Check whether the break point is at a position which will exit the function.
-bool BreakLocationIterator::IsExit() const {
- return (RelocInfo::IsJSReturn(rmode()));
-}
-
-
-bool BreakLocationIterator::HasBreakPoint() {
- return debug_info_->HasBreakPoint(code_position());
-}
-
-
-// Check whether there is a debug break at the current position.
-bool BreakLocationIterator::IsDebugBreak() {
- if (RelocInfo::IsJSReturn(rmode())) {
- return IsDebugBreakAtReturn();
+bool BreakLocation::IsDebugBreak() const {
+ if (IsExit()) {
+ return rinfo().IsPatchedReturnSequence();
} else if (IsDebugBreakSlot()) {
- return IsDebugBreakAtSlot();
+ return rinfo().IsPatchedDebugBreakSlotSequence();
} else {
- return Debug::IsDebugBreak(rinfo()->target_address());
+ return Debug::IsDebugBreak(rinfo().target_address());
}
}
@@ -491,70 +405,53 @@ static Handle<Code> DebugBreakForIC(Handle<Code> code, RelocInfo::Mode mode) {
}
-void BreakLocationIterator::SetDebugBreakAtIC() {
+void BreakLocation::SetDebugBreakAtIC() {
// Patch the original code with the current address as the current address
// might have changed by the inline caching since the code was copied.
- original_rinfo()->set_target_address(rinfo()->target_address());
+ original_rinfo().set_target_address(rinfo().target_address());
- RelocInfo::Mode mode = rmode();
- if (RelocInfo::IsCodeTarget(mode)) {
- Address target = rinfo()->target_address();
- Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
+ if (RelocInfo::IsCodeTarget(rmode_)) {
+ Handle<Code> target_code = CodeTarget();
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
- Handle<Code> dbgbrk_code = DebugBreakForIC(target_code, mode);
- rinfo()->set_target_address(dbgbrk_code->entry());
+ Handle<Code> debug_break_code = DebugBreakForIC(target_code, rmode_);
+ rinfo().set_target_address(debug_break_code->entry());
}
}
-void BreakLocationIterator::ClearDebugBreakAtIC() {
- // Patch the code to the original invoke.
- rinfo()->set_target_address(original_rinfo()->target_address());
-}
-
-
-bool BreakLocationIterator::IsDebuggerStatement() {
- return RelocInfo::DEBUG_BREAK == rmode();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakSlot() {
- return RelocInfo::DEBUG_BREAK_SLOT == rmode();
+Handle<Object> BreakLocation::BreakPointObjects() const {
+ return debug_info_->GetBreakPointObjects(pc_offset_);
}
-Object* BreakLocationIterator::BreakPointObjects() {
- return debug_info_->GetBreakPointObjects(code_position());
+Handle<Code> BreakLocation::CodeTarget() const {
+ DCHECK(IsCodeTarget());
+ Address target = rinfo().target_address();
+ return Handle<Code>(Code::GetCodeFromTargetAddress(target));
}
-// Clear out all the debug break code. This is ONLY supposed to be used when
-// shutting down the debugger as it will leave the break point information in
-// DebugInfo even though the code is patched back to the non break point state.
-void BreakLocationIterator::ClearAllDebugBreak() {
- while (!Done()) {
- ClearDebugBreak();
- Next();
- }
+Handle<Code> BreakLocation::OriginalCodeTarget() const {
+ DCHECK(IsCodeTarget());
+ Address target = original_rinfo().target_address();
+ return Handle<Code>(Code::GetCodeFromTargetAddress(target));
}
-bool BreakLocationIterator::RinfoDone() const {
- DCHECK(reloc_iterator_->done() == reloc_iterator_original_->done());
- return reloc_iterator_->done();
+bool BreakLocation::Iterator::RinfoDone() const {
+ DCHECK(reloc_iterator_.done() == reloc_iterator_original_.done());
+ return reloc_iterator_.done();
}
-void BreakLocationIterator::RinfoNext() {
- reloc_iterator_->next();
- reloc_iterator_original_->next();
+void BreakLocation::Iterator::RinfoNext() {
+ reloc_iterator_.next();
+ reloc_iterator_original_.next();
#ifdef DEBUG
- DCHECK(reloc_iterator_->done() == reloc_iterator_original_->done());
- if (!reloc_iterator_->done()) {
- DCHECK(rmode() == original_rmode());
- }
+ DCHECK(reloc_iterator_.done() == reloc_iterator_original_.done());
+ DCHECK(reloc_iterator_.done() || rmode() == original_rmode());
#endif
}
@@ -696,9 +593,10 @@ void ScriptCache::HandleWeakScript(
void Debug::HandlePhantomDebugInfo(
- const v8::PhantomCallbackData<DebugInfoListNode>& data) {
- Debug* debug = reinterpret_cast<Isolate*>(data.GetIsolate())->debug();
+ const v8::WeakCallbackInfo<DebugInfoListNode>& data) {
DebugInfoListNode* node = data.GetParameter();
+ node->ClearInfo();
+ Debug* debug = reinterpret_cast<Isolate*>(data.GetIsolate())->debug();
debug->RemoveDebugInfo(node);
#ifdef DEBUG
for (DebugInfoListNode* n = debug->debug_info_list_;
@@ -713,16 +611,20 @@ void Debug::HandlePhantomDebugInfo(
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
// Globalize the request debug info object and make it weak.
GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
- debug_info_ = Handle<DebugInfo>::cast(global_handles->Create(debug_info));
- typedef PhantomCallbackData<void>::Callback Callback;
- GlobalHandles::MakePhantom(
- reinterpret_cast<Object**>(debug_info_.location()), this, 0,
- reinterpret_cast<Callback>(Debug::HandlePhantomDebugInfo));
+ debug_info_ =
+ Handle<DebugInfo>::cast(global_handles->Create(debug_info)).location();
+ typedef WeakCallbackInfo<void>::Callback Callback;
+ GlobalHandles::MakeWeak(
+ reinterpret_cast<Object**>(debug_info_), this,
+ reinterpret_cast<Callback>(Debug::HandlePhantomDebugInfo),
+ v8::WeakCallbackType::kParameter);
}
-DebugInfoListNode::~DebugInfoListNode() {
- GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_.location()));
+void DebugInfoListNode::ClearInfo() {
+ if (debug_info_ == nullptr) return;
+ GlobalHandles::Destroy(reinterpret_cast<Object**>(debug_info_));
+ debug_info_ = nullptr;
}
@@ -744,8 +646,8 @@ bool Debug::CompileDebuggerScript(Isolate* isolate, int index) {
// Compile the script.
Handle<SharedFunctionInfo> function_info;
function_info = Compiler::CompileScript(
- source_code, script_name, 0, 0, false, false, context, NULL, NULL,
- ScriptCompiler::kNoCompileOptions, NATIVES_CODE, false);
+ source_code, script_name, 0, 0, false, false, Handle<Object>(), context,
+ NULL, NULL, ScriptCompiler::kNoCompileOptions, NATIVES_CODE, false);
// Silently ignore stack overflows during compilation.
if (function_info.is_null()) {
@@ -888,14 +790,14 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
// Find the break point where execution has stopped.
- BreakLocationIterator break_location_iterator(debug_info,
- ALL_BREAK_LOCATIONS);
- // pc points to the instruction after the current one, possibly a break
+ // PC points to the instruction after the current one, possibly a break
// location as well. So the "- 1" to exclude it from the search.
- break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
+ Address call_pc = frame->pc() - 1;
+ BreakLocation break_location =
+ BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
// Check whether step next reached a new statement.
- if (!StepNextContinue(&break_location_iterator, frame)) {
+ if (!StepNextContinue(&break_location, frame)) {
// Decrease steps left if performing multiple steps.
if (thread_local_.step_count_ > 0) {
thread_local_.step_count_--;
@@ -905,9 +807,8 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
// If there is one or more real break points check whether any of these are
// triggered.
Handle<Object> break_points_hit(heap->undefined_value(), isolate_);
- if (break_location_iterator.HasBreakPoint()) {
- Handle<Object> break_point_objects =
- Handle<Object>(break_location_iterator.BreakPointObjects(), isolate_);
+ if (break_location.HasBreakPoint()) {
+ Handle<Object> break_point_objects = break_location.BreakPointObjects();
break_points_hit = CheckBreakPoints(break_point_objects);
}
@@ -1092,11 +993,10 @@ bool Debug::SetBreakPoint(Handle<JSFunction> function,
DCHECK(*source_position >= 0);
// Find the break point and change it.
- BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(*source_position, STATEMENT_ALIGNED);
- it.SetBreakPoint(break_point_object);
-
- *source_position = it.statement_position();
+ BreakLocation location = BreakLocation::FromPosition(
+ debug_info, SOURCE_BREAK_LOCATIONS, *source_position, STATEMENT_ALIGNED);
+ *source_position = location.statement_position();
+ location.SetBreakPoint(break_point_object);
// At least one active break point now.
return debug_info->GetBreakPointCount() > 0;
@@ -1112,11 +1012,12 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
PrepareForBreakPoints();
// Obtain shared function info for the function.
- Object* result = FindSharedFunctionInfoInScript(script, *source_position);
+ Handle<Object> result =
+ FindSharedFunctionInfoInScript(script, *source_position);
if (result->IsUndefined()) return false;
// Make sure the function has set up the debug info.
- Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
+ Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>::cast(result);
if (!EnsureDebugInfo(shared, Handle<JSFunction>::null())) {
// Return if retrieving debug info failed.
return false;
@@ -1136,12 +1037,12 @@ bool Debug::SetBreakPointForScript(Handle<Script> script,
DCHECK(position >= 0);
// Find the break point and change it.
- BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromPosition(position, alignment);
- it.SetBreakPoint(break_point_object);
+ BreakLocation location = BreakLocation::FromPosition(
+ debug_info, SOURCE_BREAK_LOCATIONS, position, alignment);
+ location.SetBreakPoint(break_point_object);
- position = (alignment == STATEMENT_ALIGNED) ? it.statement_position()
- : it.position();
+ position = (alignment == STATEMENT_ALIGNED) ? location.statement_position()
+ : location.position();
*source_position = position + shared->start_position();
@@ -1156,18 +1057,21 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
DebugInfoListNode* node = debug_info_list_;
while (node != NULL) {
- Object* result = DebugInfo::FindBreakPointInfo(node->debug_info(),
- break_point_object);
+ Handle<Object> result =
+ DebugInfo::FindBreakPointInfo(node->debug_info(), break_point_object);
if (!result->IsUndefined()) {
// Get information in the break point.
- BreakPointInfo* break_point_info = BreakPointInfo::cast(result);
+ Handle<BreakPointInfo> break_point_info =
+ Handle<BreakPointInfo>::cast(result);
Handle<DebugInfo> debug_info = node->debug_info();
// Find the break point and clear it.
- BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
- it.FindBreakLocationFromAddress(debug_info->code()->entry() +
- break_point_info->code_position()->value());
- it.ClearBreakPoint(break_point_object);
+ Address pc = debug_info->code()->entry() +
+ break_point_info->code_position()->value();
+
+ BreakLocation location =
+ BreakLocation::FromAddress(debug_info, SOURCE_BREAK_LOCATIONS, pc);
+ location.ClearBreakPoint(break_point_object);
// If there are no more break points left remove the debug info for this
// function.
@@ -1182,15 +1086,17 @@ void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
}
+// Clear out all the debug break code. This is ONLY supposed to be used when
+// shutting down the debugger as it will leave the break point information in
+// DebugInfo even though the code is patched back to the non break point state.
void Debug::ClearAllBreakPoints() {
- DebugInfoListNode* node = debug_info_list_;
- while (node != NULL) {
- // Remove all debug break code.
- BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- it.ClearAllDebugBreak();
- node = node->next();
+ for (DebugInfoListNode* node = debug_info_list_; node != NULL;
+ node = node->next()) {
+ for (BreakLocation::Iterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
+ !it.Done(); it.Next()) {
+ it.GetBreakLocation().ClearDebugBreak();
+ }
}
-
// Remove all debug info.
while (debug_info_list_ != NULL) {
RemoveDebugInfoAndClearFromShared(debug_info_list_->debug_info());
@@ -1213,10 +1119,9 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function,
}
// Flood the function with break points.
- BreakLocationIterator it(GetDebugInfo(shared), type);
- while (!it.Done()) {
- it.SetOneShot();
- it.Next();
+ for (BreakLocation::Iterator it(GetDebugInfo(shared), type); !it.Done();
+ it.Next()) {
+ it.GetBreakLocation().SetOneShot();
}
}
@@ -1284,8 +1189,9 @@ void Debug::FloodHandlerWithOneShot() {
}
for (JavaScriptFrameIterator it(isolate_, id); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
- if (frame->HasHandler()) {
- // Flood the function with the catch block with break points
+ int stack_slots = 0; // The computed stack slot count is not used.
+ if (frame->LookupExceptionHandlerInTable(&stack_slots) > 0) {
+ // Flood the function with the catch/finally block with break points.
FloodWithOneShot(Handle<JSFunction>(frame->function()));
return;
}
@@ -1362,8 +1268,12 @@ void Debug::PrepareStep(StepAction step_action,
return;
}
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ frames_it.frame()->Summarize(&frames);
+ FrameSummary summary = frames.first();
+
// Get the debug info (create it if it does not exist).
- Handle<JSFunction> function(frame->function());
+ Handle<JSFunction> function(summary.function());
Handle<SharedFunctionInfo> shared(function->shared());
if (!EnsureDebugInfo(shared, function)) {
// Return if ensuring debug info failed.
@@ -1371,47 +1281,38 @@ void Debug::PrepareStep(StepAction step_action,
}
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
- // Find the break location where execution has stopped.
- BreakLocationIterator it(debug_info, ALL_BREAK_LOCATIONS);
- // pc points to the instruction after the current one, possibly a break
- // location as well. So the "- 1" to exclude it from the search.
- it.FindBreakLocationFromAddress(frame->pc() - 1);
-
// Compute whether or not the target is a call target.
bool is_load_or_store = false;
bool is_inline_cache_stub = false;
bool is_at_restarted_function = false;
Handle<Code> call_function_stub;
+ // PC points to the instruction after the current one, possibly a break
+ // location as well. So the "- 1" to exclude it from the search.
+ Address call_pc = summary.pc() - 1;
+ BreakLocation location =
+ BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
+
if (thread_local_.restarter_frame_function_pointer_ == NULL) {
- if (RelocInfo::IsCodeTarget(it.rinfo()->rmode())) {
- bool is_call_target = false;
- Address target = it.rinfo()->target_address();
- Code* code = Code::GetCodeFromTargetAddress(target);
- if (code->is_call_stub()) {
- is_call_target = true;
- }
- if (code->is_inline_cache_stub()) {
- is_inline_cache_stub = true;
- is_load_or_store = !is_call_target;
- }
+ if (location.IsCodeTarget()) {
+ Handle<Code> target_code = location.CodeTarget();
+ is_inline_cache_stub = target_code->is_inline_cache_stub();
+ is_load_or_store = is_inline_cache_stub && !target_code->is_call_stub();
// Check if target code is CallFunction stub.
- Code* maybe_call_function_stub = code;
+ Handle<Code> maybe_call_function_stub = target_code;
// If there is a breakpoint at this line look at the original code to
// check if it is a CallFunction stub.
- if (it.IsDebugBreak()) {
- Address original_target = it.original_rinfo()->target_address();
- maybe_call_function_stub =
- Code::GetCodeFromTargetAddress(original_target);
+ if (location.IsDebugBreak()) {
+ maybe_call_function_stub = location.OriginalCodeTarget();
}
if ((maybe_call_function_stub->kind() == Code::STUB &&
- CodeStub::GetMajorKey(maybe_call_function_stub) ==
+ CodeStub::GetMajorKey(*maybe_call_function_stub) ==
CodeStub::CallFunction) ||
maybe_call_function_stub->is_call_stub()) {
// Save reference to the code as we may need it to find out arguments
// count for 'step in' later.
- call_function_stub = Handle<Code>(maybe_call_function_stub);
+ call_function_stub = maybe_call_function_stub;
}
}
} else {
@@ -1419,14 +1320,14 @@ void Debug::PrepareStep(StepAction step_action,
}
// If this is the last break code target step out is the only possibility.
- if (it.IsExit() || step_action == StepOut) {
+ if (location.IsExit() || step_action == StepOut) {
if (step_action == StepOut) {
// Skip step_count frames starting with the current one.
while (step_count-- > 0 && !frames_it.done()) {
frames_it.Advance();
}
} else {
- DCHECK(it.IsExit());
+ DCHECK(location.IsExit());
frames_it.Advance();
}
// Skip builtin functions on the stack.
@@ -1443,9 +1344,9 @@ void Debug::PrepareStep(StepAction step_action,
// Set target frame pointer.
ActivateStepOut(frames_it.frame());
}
- } else if (!(is_inline_cache_stub || RelocInfo::IsConstructCall(it.rmode()) ||
- !call_function_stub.is_null() || is_at_restarted_function)
- || step_action == StepNext || step_action == StepMin) {
+ } else if (!(is_inline_cache_stub || location.IsConstructCall() ||
+ !call_function_stub.is_null() || is_at_restarted_function) ||
+ step_action == StepNext || step_action == StepMin) {
// Step next or step min.
// Fill the current function with one-shot break points.
@@ -1455,7 +1356,7 @@ void Debug::PrepareStep(StepAction step_action,
// Remember source position and frame to handle step next.
thread_local_.last_statement_position_ =
- debug_info->code()->SourceStatementPosition(frame->pc());
+ debug_info->code()->SourceStatementPosition(summary.pc());
thread_local_.last_fp_ = frame->UnpaddedFP();
} else {
// If there's restarter frame on top of the stack, just get the pointer
@@ -1527,12 +1428,20 @@ void Debug::PrepareStep(StepAction step_action,
// Object::Get/SetPropertyWithAccessor, otherwise the step action will be
// propagated on the next Debug::Break.
thread_local_.last_statement_position_ =
- debug_info->code()->SourceStatementPosition(frame->pc());
+ debug_info->code()->SourceStatementPosition(summary.pc());
thread_local_.last_fp_ = frame->UnpaddedFP();
}
// Step in or Step in min
- it.PrepareStepIn(isolate_);
+ // Step in through construct call requires no changes to the running code.
+ // Step in through getters/setters should already be prepared as well
+ // because caller of this function (Debug::PrepareStep) is expected to
+ // flood the top frame's function with one shot breakpoints.
+ // Step in through CallFunction stub should also be prepared by caller of
+ // this function (Debug::PrepareStep) which should flood target function
+ // with breakpoints.
+ DCHECK(location.IsConstructCall() || is_inline_cache_stub ||
+ !call_function_stub.is_null() || is_at_restarted_function);
ActivateStepIn(frame);
}
}
@@ -1544,7 +1453,7 @@ void Debug::PrepareStep(StepAction step_action,
// there will be several break points in the same statement when the code is
// flooded with one-shot break points. This function helps to perform several
// steps before reporting break back to the debugger.
-bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
+bool Debug::StepNextContinue(BreakLocation* break_location,
JavaScriptFrame* frame) {
// StepNext and StepOut shouldn't bring us deeper in code, so last frame
// shouldn't be a parent of current frame.
@@ -1563,11 +1472,11 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator,
// statement is hit.
if (step_action == StepNext || step_action == StepIn) {
// Never continue if returning from function.
- if (break_location_iterator->IsExit()) return false;
+ if (break_location->IsExit()) return false;
// Continue if we are still on the same frame and in the same statement.
int current_statement_position =
- break_location_iterator->code()->SourceStatementPosition(frame->pc());
+ break_location->code()->SourceStatementPosition(frame->pc());
return thread_local_.last_fp_ == frame->UnpaddedFP() &&
thread_local_.last_statement_position_ == current_statement_position;
}
@@ -1585,9 +1494,6 @@ bool Debug::IsDebugBreak(Address addr) {
}
-
-
-
// Simple function for returning the source positions for active break points.
Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared,
@@ -1674,15 +1580,12 @@ void Debug::ClearOneShot() {
// The current implementation just runs through all the breakpoints. When the
// last break point for a function is removed that function is automatically
// removed from the list.
-
- DebugInfoListNode* node = debug_info_list_;
- while (node != NULL) {
- BreakLocationIterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
- while (!it.Done()) {
- it.ClearOneShot();
- it.Next();
+ for (DebugInfoListNode* node = debug_info_list_; node != NULL;
+ node = node->next()) {
+ for (BreakLocation::Iterator it(node->debug_info(), ALL_BREAK_LOCATIONS);
+ !it.Done(); it.Next()) {
+ it.GetBreakLocation().ClearOneShot();
}
- node = node->next();
}
}
@@ -2098,8 +2001,8 @@ void Debug::PrepareForBreakPoints() {
}
-Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
- int position) {
+Handle<Object> Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
+ int position) {
// Iterate the heap looking for SharedFunctionInfo generated from the
// script. The inner most SharedFunctionInfo containing the source position
// for the requested break point is found.
@@ -2182,7 +2085,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
} // End for loop.
} // End no-allocation scope.
- if (target.is_null()) return heap->undefined_value();
+ if (target.is_null()) return isolate_->factory()->undefined_value();
// There will be at least one break point when we are done.
has_break_points_ = true;
@@ -2198,7 +2101,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
MaybeHandle<Code> maybe_result = target_function.is_null()
? Compiler::GetUnoptimizedCode(target)
: Compiler::GetUnoptimizedCode(target_function);
- if (maybe_result.is_null()) return isolate_->heap()->undefined_value();
+ if (maybe_result.is_null()) return isolate_->factory()->undefined_value();
}
} // End while loop.
@@ -2217,7 +2120,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
}
}
- return *target;
+ return target;
}
@@ -2241,6 +2144,10 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
return false;
}
+ // Make sure IC state is clean.
+ shared->code()->ClearInlineCaches();
+ shared->feedback_vector()->ClearICSlots(*shared);
+
// Create the debug info object.
Handle<DebugInfo> debug_info = isolate->factory()->NewDebugInfo(shared);
@@ -2579,7 +2486,7 @@ MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
}
-void Debug::OnThrow(Handle<Object> exception, bool uncaught) {
+void Debug::OnThrow(Handle<Object> exception) {
if (in_debug_scope() || ignore_events()) return;
// Temporarily clear any scheduled_exception to allow evaluating
// JavaScript from the debug event handler.
@@ -2589,7 +2496,7 @@ void Debug::OnThrow(Handle<Object> exception, bool uncaught) {
scheduled_exception = handle(isolate_->scheduled_exception(), isolate_);
isolate_->clear_scheduled_exception();
}
- OnException(exception, uncaught, isolate_->GetPromiseOnStackOnThrow());
+ OnException(exception, isolate_->GetPromiseOnStackOnThrow());
if (!scheduled_exception.is_null()) {
isolate_->thread_local_top()->scheduled_exception_ = *scheduled_exception;
}
@@ -2602,7 +2509,7 @@ void Debug::OnPromiseReject(Handle<JSObject> promise, Handle<Object> value) {
// Check whether the promise has been marked as having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
if (JSObject::GetDataProperty(promise, key)->IsUndefined()) {
- OnException(value, false, promise);
+ OnException(value, promise);
}
}
@@ -2617,9 +2524,10 @@ MaybeHandle<Object> Debug::PromiseHasUserDefinedRejectHandler(
}
-void Debug::OnException(Handle<Object> exception, bool uncaught,
- Handle<Object> promise) {
- if (!uncaught && promise->IsJSObject()) {
+void Debug::OnException(Handle<Object> exception, Handle<Object> promise) {
+ Isolate::CatchType catch_type = isolate_->PredictExceptionCatcher();
+ bool uncaught = (catch_type == Isolate::NOT_CAUGHT);
+ if (promise->IsJSObject()) {
Handle<JSObject> jspromise = Handle<JSObject>::cast(promise);
// Mark the promise as already having triggered a message.
Handle<Symbol> key = isolate_->factory()->promise_debug_marker_symbol();
@@ -3181,8 +3089,19 @@ void Debug::HandleDebugBreak() {
bool debug_command_only = isolate_->stack_guard()->CheckDebugCommand() &&
!isolate_->stack_guard()->CheckDebugBreak();
+ bool is_debugger_statement = !isolate_->stack_guard()->CheckDebugCommand() &&
+ !isolate_->stack_guard()->CheckDebugBreak();
+
isolate_->stack_guard()->ClearDebugBreak();
+ if (is_debugger_statement) {
+ // If we have been called via 'debugger' Javascript statement,
+ // we might not be prepared for breakpoints.
+ // TODO(dslomov,yangguo): CheckDebugBreak may race with RequestDebugBreak.
+ // Revisit this to clean-up.
+ HandleScope handle_scope(isolate_);
+ PrepareForBreakPoints();
+ }
ProcessDebugMessages(debug_command_only);
}
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 0ec9024436..3a9f82388c 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -66,84 +66,161 @@ enum BreakPositionAlignment {
};
-// Class for iterating through the break points in a function and changing
-// them.
-class BreakLocationIterator {
+class BreakLocation {
public:
- explicit BreakLocationIterator(Handle<DebugInfo> debug_info,
- BreakLocatorType type);
- virtual ~BreakLocationIterator();
-
- void Next();
- void Next(int count);
- void FindBreakLocationFromAddress(Address pc);
- void FindBreakLocationFromPosition(int position,
- BreakPositionAlignment alignment);
- void Reset();
- bool Done() const;
+ // Find the break point at the supplied address, or the closest one before
+ // the address.
+ static BreakLocation FromAddress(Handle<DebugInfo> debug_info,
+ BreakLocatorType type, Address pc);
+
+ static void FromAddressSameStatement(Handle<DebugInfo> debug_info,
+ BreakLocatorType type, Address pc,
+ List<BreakLocation>* result_out);
+
+ static BreakLocation FromPosition(Handle<DebugInfo> debug_info,
+ BreakLocatorType type, int position,
+ BreakPositionAlignment alignment);
+
+ bool IsDebugBreak() const;
+ inline bool IsExit() const { return RelocInfo::IsJSReturn(rmode_); }
+ inline bool IsConstructCall() const {
+ return RelocInfo::IsConstructCall(rmode_);
+ }
+ inline bool IsCodeTarget() const { return RelocInfo::IsCodeTarget(rmode_); }
+
+ Handle<Code> CodeTarget() const;
+ Handle<Code> OriginalCodeTarget() const;
+
+ bool IsStepInLocation() const;
+ inline bool HasBreakPoint() const {
+ return debug_info_->HasBreakPoint(pc_offset_);
+ }
+
+ Handle<Object> BreakPointObjects() const;
+
void SetBreakPoint(Handle<Object> break_point_object);
void ClearBreakPoint(Handle<Object> break_point_object);
+
void SetOneShot();
void ClearOneShot();
- bool IsStepInLocation(Isolate* isolate);
- void PrepareStepIn(Isolate* isolate);
- bool IsExit() const;
- bool HasBreakPoint();
- bool IsDebugBreak();
- Object* BreakPointObjects();
- void ClearAllDebugBreak();
-
- inline int code_position() {
- return static_cast<int>(pc() - debug_info_->code()->entry());
- }
- inline int break_point() { return break_point_; }
- inline int position() { return position_; }
- inline int statement_position() { return statement_position_; }
- inline Address pc() { return reloc_iterator_->rinfo()->pc(); }
- inline Code* code() { return debug_info_->code(); }
- inline RelocInfo* rinfo() { return reloc_iterator_->rinfo(); }
- inline RelocInfo::Mode rmode() const {
- return reloc_iterator_->rinfo()->rmode();
+ inline RelocInfo rinfo() const {
+ return RelocInfo(pc(), rmode(), data_, code());
}
- inline RelocInfo* original_rinfo() {
- return reloc_iterator_original_->rinfo();
- }
- inline RelocInfo::Mode original_rmode() const {
- return reloc_iterator_original_->rinfo()->rmode();
+
+ inline RelocInfo original_rinfo() const {
+ return RelocInfo(original_pc(), original_rmode(), original_data_,
+ original_code());
}
- bool IsDebuggerStatement();
+ inline int position() const { return position_; }
+ inline int statement_position() const { return statement_position_; }
+
+ inline Address pc() const { return code()->entry() + pc_offset_; }
+ inline Address original_pc() const {
+ return original_code()->entry() + original_pc_offset_;
+ }
- protected:
- bool RinfoDone() const;
- void RinfoNext();
+ inline RelocInfo::Mode rmode() const { return rmode_; }
+ inline RelocInfo::Mode original_rmode() const { return original_rmode_; }
- BreakLocatorType type_;
- int break_point_;
- int position_;
- int statement_position_;
- Handle<DebugInfo> debug_info_;
- RelocIterator* reloc_iterator_;
- RelocIterator* reloc_iterator_original_;
+ inline Code* code() const { return debug_info_->code(); }
+ inline Code* original_code() const { return debug_info_->original_code(); }
private:
- void SetDebugBreak();
- void ClearDebugBreak();
+ BreakLocation(Handle<DebugInfo> debug_info, RelocInfo* rinfo,
+ RelocInfo* original_rinfo, int position, int statement_position)
+ : debug_info_(debug_info),
+ pc_offset_(static_cast<int>(rinfo->pc() - debug_info->code()->entry())),
+ original_pc_offset_(static_cast<int>(
+ original_rinfo->pc() - debug_info->original_code()->entry())),
+ rmode_(rinfo->rmode()),
+ original_rmode_(original_rinfo->rmode()),
+ data_(rinfo->data()),
+ original_data_(original_rinfo->data()),
+ position_(position),
+ statement_position_(statement_position) {}
+
+ class Iterator {
+ public:
+ Iterator(Handle<DebugInfo> debug_info, BreakLocatorType type);
- void SetDebugBreakAtIC();
- void ClearDebugBreakAtIC();
+ BreakLocation GetBreakLocation() {
+ return BreakLocation(debug_info_, rinfo(), original_rinfo(), position(),
+ statement_position());
+ }
- bool IsDebugBreakAtReturn();
- void SetDebugBreakAtReturn();
- void ClearDebugBreakAtReturn();
+ inline bool Done() const { return RinfoDone(); }
+ void Next();
+
+ void SkipTo(int count) {
+ while (count-- > 0) Next();
+ }
+
+ inline RelocInfo::Mode rmode() { return reloc_iterator_.rinfo()->rmode(); }
+ inline RelocInfo::Mode original_rmode() {
+ return reloc_iterator_.rinfo()->rmode();
+ }
+
+ inline RelocInfo* rinfo() { return reloc_iterator_.rinfo(); }
+ inline RelocInfo* original_rinfo() {
+ return reloc_iterator_original_.rinfo();
+ }
- bool IsDebugBreakSlot();
- bool IsDebugBreakAtSlot();
+ inline Address pc() { return rinfo()->pc(); }
+ inline Address original_pc() { return original_rinfo()->pc(); }
+
+ int break_index() const { return break_index_; }
+
+ inline int position() const { return position_; }
+ inline int statement_position() const { return statement_position_; }
+
+ private:
+ bool RinfoDone() const;
+ void RinfoNext();
+
+ Handle<DebugInfo> debug_info_;
+ BreakLocatorType type_;
+ RelocIterator reloc_iterator_;
+ RelocIterator reloc_iterator_original_;
+ int break_index_;
+ int position_;
+ int statement_position_;
+
+ DisallowHeapAllocation no_gc_;
+
+ DISALLOW_COPY_AND_ASSIGN(Iterator);
+ };
+
+ friend class Debug;
+
+ static int BreakIndexFromAddress(Handle<DebugInfo> debug_info,
+ BreakLocatorType type, Address pc);
+
+ void ClearDebugBreak();
+ void RestoreFromOriginal(int length_in_bytes);
+
+ void SetDebugBreak();
+ void SetDebugBreakAtReturn();
void SetDebugBreakAtSlot();
- void ClearDebugBreakAtSlot();
+ void SetDebugBreakAtIC();
+
+ inline bool IsDebuggerStatement() const {
+ return RelocInfo::IsDebuggerStatement(rmode_);
+ }
+ inline bool IsDebugBreakSlot() const {
+ return RelocInfo::IsDebugBreakSlot(rmode_);
+ }
- DISALLOW_COPY_AND_ASSIGN(BreakLocationIterator);
+ Handle<DebugInfo> debug_info_;
+ int pc_offset_;
+ int original_pc_offset_;
+ RelocInfo::Mode rmode_;
+ RelocInfo::Mode original_rmode_;
+ intptr_t data_;
+ intptr_t original_data_;
+ int position_;
+ int statement_position_;
};
@@ -184,15 +261,17 @@ class ScriptCache : private HashMap {
class DebugInfoListNode {
public:
explicit DebugInfoListNode(DebugInfo* debug_info);
- virtual ~DebugInfoListNode();
+ virtual ~DebugInfoListNode() { ClearInfo(); }
DebugInfoListNode* next() { return next_; }
void set_next(DebugInfoListNode* next) { next_ = next; }
- Handle<DebugInfo> debug_info() { return debug_info_; }
+ Handle<DebugInfo> debug_info() { return Handle<DebugInfo>(debug_info_); }
+
+ void ClearInfo();
private:
// Global (weak) handle to the debug info object.
- Handle<DebugInfo> debug_info_;
+ DebugInfo** debug_info_;
// Next pointer for linked list.
DebugInfoListNode* next_;
@@ -348,7 +427,7 @@ class Debug {
// Debug event triggers.
void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
- void OnThrow(Handle<Object> exception, bool uncaught);
+ void OnThrow(Handle<Object> exception);
void OnPromiseReject(Handle<JSObject> promise, Handle<Object> value);
void OnCompileError(Handle<Script> script);
void OnBeforeCompile(Handle<Script> script);
@@ -404,8 +483,7 @@ class Debug {
void ClearStepping();
void ClearStepOut();
bool IsStepping() { return thread_local_.step_count_ > 0; }
- bool StepNextContinue(BreakLocationIterator* break_location_iterator,
- JavaScriptFrame* frame);
+ bool StepNextContinue(BreakLocation* location, JavaScriptFrame* frame);
bool StepInActive() { return thread_local_.step_into_fp_ != 0; }
void HandleStepIn(Handle<Object> function_obj, Handle<Object> holder,
Address fp, bool is_constructor);
@@ -423,13 +501,11 @@ class Debug {
static bool HasDebugInfo(Handle<SharedFunctionInfo> shared);
// This function is used in FunctionNameUsing* tests.
- Object* FindSharedFunctionInfoInScript(Handle<Script> script, int position);
+ Handle<Object> FindSharedFunctionInfoInScript(Handle<Script> script,
+ int position);
// Returns true if the current stub call is patched to call the debugger.
static bool IsDebugBreak(Address addr);
- // Returns true if the current return statement has been patched to be
- // a debugger breakpoint.
- static bool IsDebugBreakAtReturn(RelocInfo* rinfo);
static Handle<Object> GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared,
@@ -521,8 +597,7 @@ class Debug {
return break_disabled_ || in_debug_event_listener_;
}
- void OnException(Handle<Object> exception, bool uncaught,
- Handle<Object> promise);
+ void OnException(Handle<Object> exception, Handle<Object> promise);
// Constructors for debug event objects.
MUST_USE_RESULT MaybeHandle<Object> MakeJSObject(
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 665279a0ad..a8de06ee58 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/codegen.h"
+#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/disasm.h"
#include "src/full-codegen.h"
@@ -609,8 +610,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate,
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
- PROFILE(isolate_, CodeDeoptEvent(compiled_code_, bailout_id_, from_,
- fp_to_sp_delta_));
+ PROFILE(isolate_, CodeDeoptEvent(compiled_code_, from_, fp_to_sp_delta_));
}
unsigned size = ComputeInputFrameSize();
input_ = new(size) FrameDescription(size, function);
@@ -769,7 +769,7 @@ void Deoptimizer::DoComputeOutputFrames() {
fp_to_sp_delta_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT ||
(compiled_code_->is_hydrogen_stub())) {
- compiled_code_->PrintDeoptLocation(trace_scope_->file(), bailout_id_);
+ compiled_code_->PrintDeoptLocation(trace_scope_->file(), from_);
}
}
@@ -3648,34 +3648,22 @@ const char* Deoptimizer::GetDeoptReason(DeoptReason deopt_reason) {
}
-Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, int bailout_id) {
- int last_position = 0;
- Isolate* isolate = code->GetIsolate();
+Deoptimizer::DeoptInfo Deoptimizer::GetDeoptInfo(Code* code, Address pc) {
+ SourcePosition last_position = SourcePosition::Unknown();
Deoptimizer::DeoptReason last_reason = Deoptimizer::kNoReason;
int mask = RelocInfo::ModeMask(RelocInfo::DEOPT_REASON) |
- RelocInfo::ModeMask(RelocInfo::POSITION) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+ RelocInfo::ModeMask(RelocInfo::POSITION);
for (RelocIterator it(code, mask); !it.done(); it.next()) {
RelocInfo* info = it.rinfo();
+ if (info->pc() >= pc) return DeoptInfo(last_position, NULL, last_reason);
if (info->rmode() == RelocInfo::POSITION) {
- last_position = static_cast<int>(info->data());
+ int raw_position = static_cast<int>(info->data());
+ last_position = raw_position ? SourcePosition::FromRaw(raw_position)
+ : SourcePosition::Unknown();
} else if (info->rmode() == RelocInfo::DEOPT_REASON) {
last_reason = static_cast<Deoptimizer::DeoptReason>(info->data());
- } else if (last_reason != Deoptimizer::kNoReason) {
- if ((bailout_id ==
- Deoptimizer::GetDeoptimizationId(isolate, info->target_address(),
- Deoptimizer::EAGER)) ||
- (bailout_id ==
- Deoptimizer::GetDeoptimizationId(isolate, info->target_address(),
- Deoptimizer::SOFT)) ||
- (bailout_id ==
- Deoptimizer::GetDeoptimizationId(isolate, info->target_address(),
- Deoptimizer::LAZY))) {
- CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
- return DeoptInfo(last_position, NULL, last_reason);
- }
}
}
- return DeoptInfo(0, NULL, Deoptimizer::kNoReason);
+ return DeoptInfo(SourcePosition::Unknown(), NULL, Deoptimizer::kNoReason);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 471a05d9b0..fd65e83dd2 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -132,7 +132,6 @@ class OptimizedFunctionVisitor BASE_EMBEDDED {
V(kNotAHeapNumberUndefined, "not a heap number/undefined") \
V(kNotAJavaScriptObject, "not a JavaScript object") \
V(kNotASmi, "not a Smi") \
- V(kNotHeapNumber, "not heap number") \
V(kNull, "null") \
V(kOutOfBounds, "out of bounds") \
V(kOutsideOfRange, "Outside of range") \
@@ -184,15 +183,16 @@ class Deoptimizer : public Malloced {
static const char* GetDeoptReason(DeoptReason deopt_reason);
struct DeoptInfo {
- DeoptInfo(int r, const char* m, DeoptReason d)
- : raw_position(r), mnemonic(m), deopt_reason(d) {}
+ DeoptInfo(SourcePosition position, const char* m, DeoptReason d)
+ : position(position), mnemonic(m), deopt_reason(d), inlining_id(0) {}
- int raw_position;
+ SourcePosition position;
const char* mnemonic;
DeoptReason deopt_reason;
+ int inlining_id;
};
- static DeoptInfo GetDeoptInfo(Code* code, int bailout_id);
+ static DeoptInfo GetDeoptInfo(Code* code, byte* from);
struct JumpTableEntry : public ZoneObject {
inline JumpTableEntry(Address entry, const DeoptInfo& deopt_info,
@@ -322,11 +322,10 @@ class Deoptimizer : public Malloced {
static const int kNotDeoptimizationEntry = -1;
// Generators for the deoptimization entry code.
- class EntryGenerator BASE_EMBEDDED {
+ class TableEntryGenerator BASE_EMBEDDED {
public:
- EntryGenerator(MacroAssembler* masm, BailoutType type)
- : masm_(masm), type_(type) { }
- virtual ~EntryGenerator() { }
+ TableEntryGenerator(MacroAssembler* masm, BailoutType type, int count)
+ : masm_(masm), type_(type), count_(count) {}
void Generate();
@@ -335,24 +334,13 @@ class Deoptimizer : public Malloced {
BailoutType type() const { return type_; }
Isolate* isolate() const { return masm_->isolate(); }
- virtual void GeneratePrologue() { }
-
- private:
- MacroAssembler* masm_;
- Deoptimizer::BailoutType type_;
- };
-
- class TableEntryGenerator : public EntryGenerator {
- public:
- TableEntryGenerator(MacroAssembler* masm, BailoutType type, int count)
- : EntryGenerator(masm, type), count_(count) { }
-
- protected:
- virtual void GeneratePrologue();
+ void GeneratePrologue();
private:
int count() const { return count_; }
+ MacroAssembler* masm_;
+ Deoptimizer::BailoutType type_;
int count_;
};
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index e0316441af..fbdda54646 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -11,7 +11,7 @@
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
+#include "src/snapshot/serialize.h"
#include "src/string-stream.h"
namespace v8 {
@@ -85,14 +85,11 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
} else {
// No relocation information when printing code stubs.
}
-#if !V8_TARGET_ARCH_PPC
int constants = -1; // no constants being decoded at the start
-#endif
while (pc < end) {
// First decode instruction so that we know its length.
byte* prev_pc = pc;
-#if !V8_TARGET_ARCH_PPC
if (constants > 0) {
SNPrintF(decode_buffer,
"%08x constant",
@@ -121,25 +118,6 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
pc += d.InstructionDecode(decode_buffer, pc);
}
}
-#else // !V8_TARGET_ARCH_PPC
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
- // Function descriptors are specially decoded and skipped.
- // Other internal references (load of ool constant pool pointer)
- // are not since they are a encoded as a regular mov sequence.
- int skip;
- if (it != NULL && !it->done() && it->rinfo()->pc() == pc &&
- it->rinfo()->rmode() == RelocInfo::INTERNAL_REFERENCE &&
- (skip = Assembler::DecodeInternalReference(decode_buffer, pc))) {
- pc += skip;
- } else {
- decode_buffer[0] = '\0';
- pc += d.InstructionDecode(decode_buffer, pc);
- }
-#else
- decode_buffer[0] = '\0';
- pc += d.InstructionDecode(decode_buffer, pc);
-#endif // ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
-#endif // !V8_TARGET_ARCH_PPC
// Collect RelocInfo for this instruction (prev_pc .. pc-1)
List<const char*> comments(4);
@@ -207,8 +185,8 @@ static int DecodeIt(Isolate* isolate, std::ostream* os,
SmartArrayPointer<const char> obj_name = accumulator.ToCString();
out.AddFormatted(" ;; object: %s", obj_name.get());
} else if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- const char* reference_name =
- ref_encoder.NameOfAddress(relocinfo.target_reference());
+ const char* reference_name = ref_encoder.NameOfAddress(
+ isolate, relocinfo.target_external_reference());
out.AddFormatted(" ;; external reference (%s)", reference_name);
} else if (RelocInfo::IsCodeTarget(rmode)) {
out.AddFormatted(" ;; code:");
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index cd7ba984af..c523818d87 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -991,10 +991,10 @@ class FastElementsAccessor
(IsFastDoubleElementsKind(KindTraits::Kind) ==
((map == isolate->heap()->fixed_array_map() && length == 0) ||
map == isolate->heap()->fixed_double_array_map())));
+ if (length == 0) return; // nothing to do!
DisallowHeapAllocation no_gc;
+ Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
for (int i = 0; i < length; i++) {
- HandleScope scope(isolate);
- Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
DCHECK((!IsFastSmiElementsKind(KindTraits::Kind) ||
BackingStore::get(backing_store, i)->IsSmi()) ||
(IsFastHoleyElementsKind(KindTraits::Kind) ==
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 9dfef372bc..7ae67410e4 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -210,11 +210,11 @@ MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
DCHECK(catcher.HasCaught());
DCHECK(isolate->has_pending_exception());
DCHECK(isolate->external_caught_exception());
- if (exception_out != NULL) {
- if (isolate->pending_exception() ==
- isolate->heap()->termination_exception()) {
- is_termination = true;
- } else {
+ if (isolate->pending_exception() ==
+ isolate->heap()->termination_exception()) {
+ is_termination = true;
+ } else {
+ if (exception_out != NULL) {
*exception_out = v8::Utils::OpenHandle(*catcher.Exception());
}
}
@@ -222,9 +222,11 @@ MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
}
DCHECK(!isolate->has_pending_exception());
- DCHECK(!isolate->external_caught_exception());
}
- if (is_termination) isolate->TerminateExecution();
+
+ // Re-request terminate execution interrupt to trigger later.
+ if (is_termination) isolate->stack_guard()->RequestTerminateExecution();
+
return maybe_result;
}
@@ -649,6 +651,13 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv,
}
+void StackGuard::CheckAndHandleGCInterrupt() {
+ if (CheckAndClearInterrupt(GC_REQUEST)) {
+ isolate_->heap()->HandleGCRequest();
+ }
+}
+
+
Object* StackGuard::HandleInterrupts() {
if (CheckAndClearInterrupt(GC_REQUEST)) {
isolate_->heap()->HandleGCRequest();
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 47cbb08f03..870bb91184 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -197,6 +197,10 @@ class StackGuard FINAL {
// stack overflow, then handle the interruption accordingly.
Object* HandleInterrupts();
+ bool InterruptRequested() { return GetCurrentStackPosition() < climit(); }
+
+ void CheckAndHandleGCInterrupt();
+
private:
StackGuard();
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 2c1f91d399..58f808bbc9 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -123,11 +123,6 @@ void StatisticsExtension::GetCounters(
{heap->cell_space()->Size(), "cell_space_live_bytes"},
{heap->cell_space()->Available(), "cell_space_available_bytes"},
{heap->cell_space()->CommittedMemory(), "cell_space_commited_bytes"},
- {heap->property_cell_space()->Size(), "property_cell_space_live_bytes"},
- {heap->property_cell_space()->Available(),
- "property_cell_space_available_bytes"},
- {heap->property_cell_space()->CommittedMemory(),
- "property_cell_space_commited_bytes"},
{heap->lo_space()->Size(), "lo_space_live_bytes"},
{heap->lo_space()->Available(), "lo_space_available_bytes"},
{heap->lo_space()->CommittedMemory(), "lo_space_commited_bytes"},
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 95590adc96..1cdce60874 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -538,10 +538,30 @@ MaybeHandle<String> Factory::NewConsString(Handle<String> left,
NewRawTwoByteString(length).ToHandleChecked(), left, right);
}
- Handle<Map> map = (is_one_byte || is_one_byte_data_in_two_byte_string)
- ? cons_one_byte_string_map()
- : cons_string_map();
- Handle<ConsString> result = New<ConsString>(map, NEW_SPACE);
+ return (is_one_byte || is_one_byte_data_in_two_byte_string)
+ ? NewOneByteConsString(length, left, right)
+ : NewTwoByteConsString(length, left, right);
+}
+
+
+MaybeHandle<String> Factory::NewOneByteConsString(int length,
+ Handle<String> left,
+ Handle<String> right) {
+ return NewRawConsString(cons_one_byte_string_map(), length, left, right);
+}
+
+
+MaybeHandle<String> Factory::NewTwoByteConsString(int length,
+ Handle<String> left,
+ Handle<String> right) {
+ return NewRawConsString(cons_string_map(), length, left, right);
+}
+
+
+MaybeHandle<String> Factory::NewRawConsString(Handle<Map> map, int length,
+ Handle<String> left,
+ Handle<String> right) {
+ Handle<ConsString> result = New<ConsString>(map, NEW_SPACE);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
@@ -802,7 +822,6 @@ Handle<CodeCache> Factory::NewCodeCache() {
Handle<CodeCache>::cast(NewStruct(CODE_CACHE_TYPE));
code_cache->set_default_cache(*empty_fixed_array(), SKIP_WRITE_BARRIER);
code_cache->set_normal_type_cache(*undefined_value(), SKIP_WRITE_BARRIER);
- code_cache->set_weak_cell_cache(*undefined_value(), SKIP_WRITE_BARRIER);
return code_cache;
}
@@ -905,7 +924,7 @@ Handle<Cell> Factory::NewCell(Handle<Object> value) {
}
-Handle<PropertyCell> Factory::NewPropertyCellWithHole() {
+Handle<PropertyCell> Factory::NewPropertyCell() {
CALL_HEAP_FUNCTION(
isolate(),
isolate()->heap()->AllocatePropertyCell(),
@@ -913,14 +932,6 @@ Handle<PropertyCell> Factory::NewPropertyCellWithHole() {
}
-Handle<PropertyCell> Factory::NewPropertyCell(Handle<Object> value) {
- AllowDeferredHandleDereference convert_to_cell;
- Handle<PropertyCell> cell = NewPropertyCellWithHole();
- PropertyCell::SetValueInferType(cell, value);
- return cell;
-}
-
-
Handle<WeakCell> Factory::NewWeakCell(Handle<HeapObject> value) {
AllowDeferredHandleDereference convert_to_cell;
CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateWeakCell(*value),
@@ -1053,58 +1064,58 @@ Handle<HeapNumber> Factory::NewHeapNumber(double value,
}
-MaybeHandle<Object> Factory::NewTypeError(const char* message,
- Vector<Handle<Object> > args) {
+Handle<Object> Factory::NewTypeError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeTypeError", message, args);
}
-MaybeHandle<Object> Factory::NewTypeError(Handle<String> message) {
+Handle<Object> Factory::NewTypeError(Handle<String> message) {
return NewError("$TypeError", message);
}
-MaybeHandle<Object> Factory::NewRangeError(const char* message,
- Vector<Handle<Object> > args) {
+Handle<Object> Factory::NewRangeError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeRangeError", message, args);
}
-MaybeHandle<Object> Factory::NewRangeError(Handle<String> message) {
+Handle<Object> Factory::NewRangeError(Handle<String> message) {
return NewError("$RangeError", message);
}
-MaybeHandle<Object> Factory::NewSyntaxError(const char* message,
- Handle<JSArray> args) {
+Handle<Object> Factory::NewSyntaxError(const char* message,
+ Handle<JSArray> args) {
return NewError("MakeSyntaxError", message, args);
}
-MaybeHandle<Object> Factory::NewSyntaxError(Handle<String> message) {
+Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
return NewError("$SyntaxError", message);
}
-MaybeHandle<Object> Factory::NewReferenceError(const char* message,
- Vector<Handle<Object> > args) {
+Handle<Object> Factory::NewReferenceError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeReferenceError", message, args);
}
-MaybeHandle<Object> Factory::NewReferenceError(const char* message,
- Handle<JSArray> args) {
+Handle<Object> Factory::NewReferenceError(const char* message,
+ Handle<JSArray> args) {
return NewError("MakeReferenceError", message, args);
}
-MaybeHandle<Object> Factory::NewReferenceError(Handle<String> message) {
+Handle<Object> Factory::NewReferenceError(Handle<String> message) {
return NewError("$ReferenceError", message);
}
-MaybeHandle<Object> Factory::NewError(const char* maker, const char* message,
- Vector<Handle<Object> > args) {
+Handle<Object> Factory::NewError(const char* maker, const char* message,
+ Vector<Handle<Object> > args) {
// Instantiate a closeable HandleScope for EscapeFrom.
v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
Handle<FixedArray> array = NewFixedArray(args.length());
@@ -1112,21 +1123,19 @@ MaybeHandle<Object> Factory::NewError(const char* maker, const char* message,
array->set(i, *args[i]);
}
Handle<JSArray> object = NewJSArrayWithElements(array);
- Handle<Object> result;
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
- NewError(maker, message, object), Object);
+ Handle<Object> result = NewError(maker, message, object);
return result.EscapeFrom(&scope);
}
-MaybeHandle<Object> Factory::NewEvalError(const char* message,
- Vector<Handle<Object> > args) {
+Handle<Object> Factory::NewEvalError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeEvalError", message, args);
}
-MaybeHandle<Object> Factory::NewError(const char* message,
- Vector<Handle<Object> > args) {
+Handle<Object> Factory::NewError(const char* message,
+ Vector<Handle<Object> > args) {
return NewError("MakeError", message, args);
}
@@ -1167,8 +1176,8 @@ Handle<String> Factory::EmergencyNewError(const char* message,
}
-MaybeHandle<Object> Factory::NewError(const char* maker, const char* message,
- Handle<JSArray> args) {
+Handle<Object> Factory::NewError(const char* maker, const char* message,
+ Handle<JSArray> args) {
Handle<String> make_str = InternalizeUtf8String(maker);
Handle<Object> fun_obj = Object::GetProperty(
isolate()->js_builtins_object(), make_str).ToHandleChecked();
@@ -1190,19 +1199,21 @@ MaybeHandle<Object> Factory::NewError(const char* maker, const char* message,
arraysize(argv),
argv,
&exception).ToHandle(&result)) {
- return exception;
+ Handle<Object> exception_obj;
+ if (exception.ToHandle(&exception_obj)) return exception_obj;
+ return undefined_value();
}
return result;
}
-MaybeHandle<Object> Factory::NewError(Handle<String> message) {
+Handle<Object> Factory::NewError(Handle<String> message) {
return NewError("$Error", message);
}
-MaybeHandle<Object> Factory::NewError(const char* constructor,
- Handle<String> message) {
+Handle<Object> Factory::NewError(const char* constructor,
+ Handle<String> message) {
Handle<String> constr = InternalizeUtf8String(constructor);
Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty(
isolate()->js_builtins_object(), constr).ToHandleChecked());
@@ -1217,7 +1228,9 @@ MaybeHandle<Object> Factory::NewError(const char* constructor,
arraysize(argv),
argv,
&exception).ToHandle(&result)) {
- return exception;
+ Handle<Object> exception_obj;
+ if (exception.ToHandle(&exception_obj)) return exception_obj;
+ return undefined_value();
}
return result;
}
@@ -1353,10 +1366,8 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
}
-static bool ShouldOptimizeNewClosure(Isolate* isolate,
- Handle<SharedFunctionInfo> info) {
- return isolate->use_crankshaft() && !info->is_toplevel() &&
- info->is_compiled() && info->allows_lazy_compilation();
+static bool ShouldOptimizeNewClosure(Handle<SharedFunctionInfo> info) {
+ return !info->is_toplevel() && info->allows_lazy_compilation();
}
@@ -1378,13 +1389,6 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
if (!info->bound() && index < 0) {
int number_of_literals = info->num_literals();
Handle<FixedArray> literals = NewFixedArray(number_of_literals, pretenure);
- if (number_of_literals > 0) {
- // Store the native context in the literals array prefix. This
- // context will be used when creating object, regexp and array
- // literals in this function.
- literals->set(JSFunction::kLiteralNativeContextIndex,
- context->native_context());
- }
result->set_literals(*literals);
}
@@ -1398,7 +1402,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
return result;
}
- if (FLAG_always_opt && ShouldOptimizeNewClosure(isolate(), info)) {
+ if (FLAG_always_opt && ShouldOptimizeNewClosure(info)) {
result->MarkForOptimization();
}
return result;
@@ -1573,10 +1577,11 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
PropertyDetails details = descs->GetDetails(i);
// Only accessors are expected.
DCHECK_EQ(ACCESSOR_CONSTANT, details.type());
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1);
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+ PropertyCellType::kMutable);
Handle<Name> name(descs->GetKey(i));
- Handle<Object> value(descs->GetCallbacksObject(i), isolate());
- Handle<PropertyCell> cell = NewPropertyCell(value);
+ Handle<PropertyCell> cell = NewPropertyCell();
+ cell->set_value(descs->GetCallbacksObject(i));
// |dictionary| already contains enough space for all properties.
USE(NameDictionary::Add(dictionary, name, cell, d));
}
@@ -1791,8 +1796,14 @@ void SetupArrayBufferView(i::Isolate* isolate,
obj->set_buffer(*buffer);
- obj->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*obj);
+ Heap* heap = isolate->heap();
+ if (heap->InNewSpace(*obj)) {
+ obj->set_weak_next(heap->new_array_buffer_views_list());
+ heap->set_new_array_buffer_views_list(*obj);
+ } else {
+ obj->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*obj);
+ }
i::Handle<i::Object> byte_offset_object =
isolate->factory()->NewNumberFromSize(byte_offset);
@@ -1949,7 +1960,7 @@ void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
InitializeFunction(js_function, shared.ToHandleChecked(), context);
} else {
// Provide JSObjects with a constructor.
- map->set_constructor(context->object_function());
+ map->SetConstructor(context->object_function());
}
}
@@ -2009,9 +2020,14 @@ void Factory::BecomeJSFunction(Handle<JSProxy> proxy) {
}
-Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
- const FeedbackVectorSpec& spec) {
- return TypeFeedbackVector::Allocate(isolate(), spec);
+template Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
+ const ZoneFeedbackVectorSpec* spec);
+template Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
+ const FeedbackVectorSpec* spec);
+
+template <typename Spec>
+Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(const Spec* spec) {
+ return TypeFeedbackVector::Allocate<Spec>(isolate(), spec);
}
@@ -2024,14 +2040,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
shared->set_scope_info(*scope_info);
shared->set_feedback_vector(*feedback_vector);
shared->set_kind(kind);
- int literals_array_size = number_of_literals;
- // If the function contains object, regexp or array literals,
- // allocate extra space for a literals array prefix containing the
- // context.
- if (number_of_literals > 0) {
- literals_array_size += JSFunction::kLiteralsPrefixSize;
- }
- shared->set_num_literals(literals_array_size);
+ shared->set_num_literals(number_of_literals);
if (IsGeneratorFunction(kind)) {
shared->set_instance_class_name(isolate()->heap()->Generator_string());
shared->DisableOptimization(kGenerator);
@@ -2066,8 +2075,8 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
Handle<String> name,
MaybeHandle<Code> maybe_code) {
Handle<Map> map = shared_function_info_map();
- Handle<SharedFunctionInfo> share = New<SharedFunctionInfo>(map,
- OLD_POINTER_SPACE);
+ Handle<SharedFunctionInfo> share =
+ New<SharedFunctionInfo>(map, OLD_POINTER_SPACE);
// Set pointer fields.
share->set_name(*name);
@@ -2086,9 +2095,9 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
- FeedbackVectorSpec empty_spec;
+ FeedbackVectorSpec empty_spec(0);
Handle<TypeFeedbackVector> feedback_vector =
- NewTypeFeedbackVector(empty_spec);
+ NewTypeFeedbackVector(&empty_spec);
share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
#if TRACE_MAPS
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 4dfd98c61c..0bb883d72b 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -196,6 +196,14 @@ class Factory FINAL {
// Create a new cons string object which consists of a pair of strings.
MUST_USE_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
Handle<String> right);
+ MUST_USE_RESULT MaybeHandle<String> NewOneByteConsString(
+ int length, Handle<String> left, Handle<String> right);
+ MUST_USE_RESULT MaybeHandle<String> NewTwoByteConsString(
+ int length, Handle<String> left, Handle<String> right);
+ MUST_USE_RESULT MaybeHandle<String> NewRawConsString(Handle<Map> map,
+ int length,
+ Handle<String> left,
+ Handle<String> right);
// Create a new string object which holds a proper substring of a string.
Handle<String> NewProperSubString(Handle<String> str,
@@ -292,9 +300,7 @@ class Factory FINAL {
Handle<Cell> NewCell(Handle<Object> value);
- Handle<PropertyCell> NewPropertyCellWithHole();
-
- Handle<PropertyCell> NewPropertyCell(Handle<Object> value);
+ Handle<PropertyCell> NewPropertyCell();
Handle<WeakCell> NewWeakCell(Handle<HeapObject> value);
@@ -519,40 +525,38 @@ class Factory FINAL {
// Interface for creating error objects.
- MaybeHandle<Object> NewError(const char* maker, const char* message,
- Handle<JSArray> args);
+ Handle<Object> NewError(const char* maker, const char* message,
+ Handle<JSArray> args);
Handle<String> EmergencyNewError(const char* message, Handle<JSArray> args);
- MaybeHandle<Object> NewError(const char* maker, const char* message,
- Vector<Handle<Object> > args);
- MaybeHandle<Object> NewError(const char* message,
- Vector<Handle<Object> > args);
- MaybeHandle<Object> NewError(Handle<String> message);
- MaybeHandle<Object> NewError(const char* constructor, Handle<String> message);
+ Handle<Object> NewError(const char* maker, const char* message,
+ Vector<Handle<Object> > args);
+ Handle<Object> NewError(const char* message, Vector<Handle<Object> > args);
+ Handle<Object> NewError(Handle<String> message);
+ Handle<Object> NewError(const char* constructor, Handle<String> message);
- MaybeHandle<Object> NewTypeError(const char* message,
- Vector<Handle<Object> > args);
- MaybeHandle<Object> NewTypeError(Handle<String> message);
+ Handle<Object> NewTypeError(const char* message,
+ Vector<Handle<Object> > args);
+ Handle<Object> NewTypeError(Handle<String> message);
- MaybeHandle<Object> NewRangeError(const char* message,
- Vector<Handle<Object> > args);
- MaybeHandle<Object> NewRangeError(Handle<String> message);
+ Handle<Object> NewRangeError(const char* message,
+ Vector<Handle<Object> > args);
+ Handle<Object> NewRangeError(Handle<String> message);
- MaybeHandle<Object> NewInvalidStringLengthError() {
+ Handle<Object> NewInvalidStringLengthError() {
return NewRangeError("invalid_string_length",
HandleVector<Object>(NULL, 0));
}
- MaybeHandle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
- MaybeHandle<Object> NewSyntaxError(Handle<String> message);
-
- MaybeHandle<Object> NewReferenceError(const char* message,
- Vector<Handle<Object> > args);
- MaybeHandle<Object> NewReferenceError(const char* message,
- Handle<JSArray> args);
- MaybeHandle<Object> NewReferenceError(Handle<String> message);
+ Handle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
+ Handle<Object> NewSyntaxError(Handle<String> message);
- MaybeHandle<Object> NewEvalError(const char* message,
+ Handle<Object> NewReferenceError(const char* message,
Vector<Handle<Object> > args);
+ Handle<Object> NewReferenceError(const char* message, Handle<JSArray> args);
+ Handle<Object> NewReferenceError(Handle<String> message);
+
+ Handle<Object> NewEvalError(const char* message,
+ Vector<Handle<Object> > args);
Handle<String> NumberToString(Handle<Object> number,
bool check_number_string_cache = true);
@@ -620,8 +624,8 @@ class Factory FINAL {
MaybeHandle<Code> code);
// Allocate a new type feedback vector
- Handle<TypeFeedbackVector> NewTypeFeedbackVector(
- const FeedbackVectorSpec& spec);
+ template <typename Spec>
+ Handle<TypeFeedbackVector> NewTypeFeedbackVector(const Spec* spec);
// Allocates a new JSMessageObject object.
Handle<JSMessageObject> NewJSMessageObject(
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 46d8aa94de..f1bdc0c290 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -182,32 +182,29 @@ DEFINE_IMPLICATION(harmony, es_staging)
DEFINE_IMPLICATION(es_staging, harmony)
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V) \
- V(harmony_modules, "harmony modules (implies block scoping)") \
- V(harmony_arrays, "harmony array methods") \
- V(harmony_array_includes, "harmony Array.prototype.includes") \
- V(harmony_regexps, "harmony regular expression extensions") \
- V(harmony_arrow_functions, "harmony arrow functions") \
- V(harmony_proxies, "harmony proxies") \
- V(harmony_sloppy, "harmony features in sloppy mode") \
- V(harmony_unicode, "harmony unicode escapes") \
- V(harmony_unicode_regexps, "harmony unicode regexps") \
- V(harmony_computed_property_names, "harmony computed property names") \
- V(harmony_rest_parameters, "harmony rest parameters") \
+#define HARMONY_INPROGRESS(V) \
+ V(harmony_modules, "harmony modules") \
+ V(harmony_arrays, "harmony array methods") \
+ V(harmony_array_includes, "harmony Array.prototype.includes") \
+ V(harmony_regexps, "harmony regular expression extensions") \
+ V(harmony_arrow_functions, "harmony arrow functions") \
+ V(harmony_proxies, "harmony proxies") \
+ V(harmony_sloppy, "harmony features in sloppy mode") \
+ V(harmony_unicode, "harmony unicode escapes") \
+ V(harmony_unicode_regexps, "harmony unicode regexps") \
+ V(harmony_rest_parameters, "harmony rest parameters") \
+ V(harmony_reflect, "harmony Reflect API")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_tostring, "harmony toString") \
+#define HARMONY_STAGED(V) \
+ V(harmony_computed_property_names, "harmony computed property names") \
+ V(harmony_tostring, "harmony toString")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
- V(harmony_numeric_literals, "harmony numeric literals") \
- V(harmony_strings, "harmony string methods") \
- V(harmony_scoping, "harmony block scoping") \
- V(harmony_templates, "harmony template literals") \
- V(harmony_classes, \
- "harmony classes (implies block scoping & object literal extension)") \
- V(harmony_object_literals, "harmony object literal extensions") \
+#define HARMONY_SHIPPING(V) \
+ V(harmony_numeric_literals, "harmony numeric literals") \
+ V(harmony_classes, "harmony classes (implies object literal extension)") \
+ V(harmony_object_literals, "harmony object literal extensions")
// Once a shipping feature has proved stable in the wild, it will be dropped
// from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -234,8 +231,6 @@ HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
// Feature dependencies.
-DEFINE_IMPLICATION(harmony_modules, harmony_scoping)
-DEFINE_IMPLICATION(harmony_classes, harmony_scoping)
DEFINE_IMPLICATION(harmony_classes, harmony_object_literals)
DEFINE_IMPLICATION(harmony_unicode_regexps, harmony_unicode)
@@ -301,6 +296,8 @@ DEFINE_BOOL(collect_megamorphic_maps_from_stub_cache, true,
"crankshaft harvests type feedback from stub cache")
DEFINE_BOOL(hydrogen_stats, false, "print statistics for hydrogen")
DEFINE_BOOL(trace_check_elimination, false, "trace check elimination phase")
+DEFINE_BOOL(trace_environment_liveness, false,
+ "trace liveness of local variable slots")
DEFINE_BOOL(trace_hydrogen, false, "trace generated hydrogen to file")
DEFINE_STRING(trace_hydrogen_filter, "*", "hydrogen tracing filter")
DEFINE_BOOL(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
@@ -367,10 +364,9 @@ DEFINE_BOOL(optimize_for_in, true, "optimize functions containing for-in loops")
DEFINE_BOOL(concurrent_recompilation, true,
"optimizing hot functions asynchronously on a separate thread")
-DEFINE_BOOL(job_based_recompilation, false,
+DEFINE_BOOL(job_based_recompilation, true,
"post tasks to v8::Platform instead of using a thread for "
"concurrent recompilation")
-DEFINE_IMPLICATION(job_based_recompilation, concurrent_recompilation)
DEFINE_BOOL(trace_concurrent_recompilation, false,
"track concurrent recompilation")
DEFINE_INT(concurrent_recompilation_queue_length, 8,
@@ -402,6 +398,7 @@ DEFINE_BOOL(turbo_verify, DEBUG_BOOL, "verify TurboFan graphs at each phase")
DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
DEFINE_BOOL(turbo_splitting, true, "split nodes during scheduling in TurboFan")
DEFINE_BOOL(turbo_types, true, "use typed lowering in TurboFan")
+DEFINE_BOOL(turbo_type_feedback, false, "use type feedback in TurboFan")
DEFINE_BOOL(turbo_source_positions, false,
"track source code positions when building TurboFan IR")
DEFINE_IMPLICATION(trace_turbo, turbo_source_positions)
@@ -409,9 +406,9 @@ DEFINE_BOOL(context_specialization, false,
"enable context specialization in TurboFan")
DEFINE_BOOL(turbo_deoptimization, false, "enable deoptimization in TurboFan")
DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
+DEFINE_BOOL(turbo_builtin_inlining, true, "enable builtin inlining in TurboFan")
DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
-DEFINE_IMPLICATION(turbo_inlining, turbo_types)
DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
// TODO(dcarney): this is just for experimentation, remove when default.
DEFINE_BOOL(turbo_delay_ssa_decon, false,
@@ -420,11 +417,11 @@ DEFINE_BOOL(turbo_verify_allocation, DEBUG_BOOL,
"verify register allocation in TurboFan")
DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
DEFINE_BOOL(turbo_jt, true, "enable jump threading in TurboFan")
-DEFINE_BOOL(turbo_osr, false, "enable OSR in TurboFan")
+DEFINE_BOOL(turbo_osr, true, "enable OSR in TurboFan")
DEFINE_BOOL(turbo_exceptions, false, "enable exception handling in TurboFan")
DEFINE_BOOL(turbo_stress_loop_peeling, false,
"stress loop peeling optimization")
-DEFINE_BOOL(turbo_switch, true, "optimize switches in TurboFan")
+DEFINE_BOOL(turbo_cf_optimization, true, "optimize control flow in TurboFan")
DEFINE_INT(typed_array_max_size_in_heap, 64,
"threshold for in-heap typed array")
@@ -578,6 +575,8 @@ DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
+DEFINE_INT(retain_maps_for_n_gc, 2,
+ "keeps maps alive for <n> old space garbage collections")
DEFINE_BOOL(trace_gc, false,
"print one trace line following each garbage collection")
DEFINE_BOOL(trace_gc_nvp, false,
@@ -596,8 +595,9 @@ DEFINE_BOOL(print_max_heap_committed, false,
"in name=value format on exit")
DEFINE_BOOL(trace_gc_verbose, false,
"print more details following each garbage collection")
-DEFINE_BOOL(trace_fragmentation, false,
- "report fragmentation for old pointer and data pages")
+DEFINE_BOOL(trace_fragmentation, false, "report fragmentation for old space")
+DEFINE_BOOL(trace_fragmentation_verbose, false,
+ "report fragmentation for old space (detailed)")
DEFINE_BOOL(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true,
@@ -614,8 +614,13 @@ DEFINE_BOOL(age_code, true,
"old code (required for code flushing)")
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_steps, true, "do incremental marking steps")
-DEFINE_BOOL(overapproximate_weak_closure, false,
+DEFINE_BOOL(overapproximate_weak_closure, true,
"overapproximate weak closer to reduce atomic pause time")
+DEFINE_INT(min_progress_during_object_groups_marking, 128,
+ "keep overapproximating the weak closure as long as we discover at "
+ "least this many unmarked objects")
+DEFINE_INT(max_object_groups_marking_rounds, 3,
+ "at most try this many times to over approximate the weak closure")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
@@ -753,6 +758,19 @@ DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
+// mark-compact.cc
+DEFINE_BOOL(force_marking_deque_overflows, false,
+ "force overflows of marking deque by reducing it's size "
+ "to 64 words")
+
+DEFINE_BOOL(stress_compaction, false,
+ "stress the GC compactor to flush out bugs (implies "
+ "--force_marking_deque_overflows)")
+
+DEFINE_BOOL(manual_evacuation_candidates_selection, false,
+ "Test mode only flag. It allows an unit test to select evacuation "
+ "candidates pages (requires --stress_compaction).")
+
//
// Dev shell flags
@@ -770,21 +788,24 @@ DEFINE_ARGS(js_arguments,
//
// GDB JIT integration flags.
//
+#undef FLAG
+#ifdef ENABLE_GDB_JIT_INTERFACE
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
-DEFINE_BOOL(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
+DEFINE_BOOL(gdbjit, false, "enable GDBJIT interface")
DEFINE_BOOL(gdbjit_full, false, "enable GDBJIT interface for all code objects")
DEFINE_BOOL(gdbjit_dump, false, "dump elf objects with debug info to disk")
DEFINE_STRING(gdbjit_dump_filter, "",
"dump only objects containing this substring")
-// mark-compact.cc
-DEFINE_BOOL(force_marking_deque_overflows, false,
- "force overflows of marking deque by reducing it's size "
- "to 64 words")
-
-DEFINE_BOOL(stress_compaction, false,
- "stress the GC compactor to flush out bugs (implies "
- "--force_marking_deque_overflows)")
+#ifdef ENABLE_GDB_JIT_INTERFACE
+DEFINE_IMPLICATION(gdbjit_full, gdbjit)
+DEFINE_IMPLICATION(gdbjit_dump, gdbjit)
+#endif
+DEFINE_NEG_IMPLICATION(gdbjit, compact_code_space)
//
// Debug only flags
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index d498c28240..7386238090 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -543,15 +543,10 @@ void FlagList::PrintHelp() {
}
-// static
-void FlagList::EnforceFlagImplications() {
-#define FLAG_MODE_DEFINE_IMPLICATIONS
-#include "src/flag-definitions.h"
-#undef FLAG_MODE_DEFINE_IMPLICATIONS
-}
+static uint32_t flag_hash = 0;
-uint32_t FlagList::Hash() {
+void ComputeFlagListHash() {
std::ostringstream modified_args_as_string;
#ifdef DEBUG
modified_args_as_string << "debug";
@@ -564,7 +559,19 @@ uint32_t FlagList::Hash() {
}
}
std::string args(modified_args_as_string.str());
- return static_cast<uint32_t>(
+ flag_hash = static_cast<uint32_t>(
base::hash_range(args.c_str(), args.c_str() + args.length()));
}
+
+
+// static
+void FlagList::EnforceFlagImplications() {
+#define FLAG_MODE_DEFINE_IMPLICATIONS
+#include "src/flag-definitions.h"
+#undef FLAG_MODE_DEFINE_IMPLICATIONS
+ ComputeFlagListHash();
+}
+
+
+uint32_t FlagList::Hash() { return flag_hash; }
} } // namespace v8::internal
diff --git a/deps/v8/src/flags.h b/deps/v8/src/flags.h
index 9ec5d30119..545c172d36 100644
--- a/deps/v8/src/flags.h
+++ b/deps/v8/src/flags.h
@@ -58,7 +58,8 @@ class FlagList {
// Set flags as consequence of being implied by another flag.
static void EnforceFlagImplications();
- // Hash of current flags (to quickly determine flag changes).
+ // Hash of flags (to quickly determine mismatching flag expectations).
+ // This hash is calculated during V8::Initialize and cached.
static uint32_t Hash();
};
diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h
index 824c1a7620..650d6f9725 100644
--- a/deps/v8/src/frames-inl.h
+++ b/deps/v8/src/frames-inl.h
@@ -44,63 +44,11 @@ inline StackHandler* StackHandler::next() const {
}
-inline bool StackHandler::includes(Address address) const {
- Address start = this->address();
- Address end = start + StackHandlerConstants::kSize;
- return start <= address && address <= end;
-}
-
-
-inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
- v->VisitPointer(context_address());
- v->VisitPointer(code_address());
-}
-
-
inline StackHandler* StackHandler::FromAddress(Address address) {
return reinterpret_cast<StackHandler*>(address);
}
-inline bool StackHandler::is_js_entry() const {
- return kind() == JS_ENTRY;
-}
-
-
-inline bool StackHandler::is_catch() const {
- return kind() == CATCH;
-}
-
-
-inline bool StackHandler::is_finally() const {
- return kind() == FINALLY;
-}
-
-
-inline StackHandler::Kind StackHandler::kind() const {
- const int offset = StackHandlerConstants::kStateIntOffset;
- return KindField::decode(Memory::unsigned_at(address() + offset));
-}
-
-
-inline unsigned StackHandler::index() const {
- const int offset = StackHandlerConstants::kStateIntOffset;
- return IndexField::decode(Memory::unsigned_at(address() + offset));
-}
-
-
-inline Object** StackHandler::context_address() const {
- const int offset = StackHandlerConstants::kContextOffset;
- return reinterpret_cast<Object**>(address() + offset);
-}
-
-
-inline Object** StackHandler::code_address() const {
- const int offset = StackHandlerConstants::kCodeOffset;
- return reinterpret_cast<Object**>(address() + offset);
-}
-
-
inline StackFrame::StackFrame(StackFrameIteratorBase* iterator)
: iterator_(iterator), isolate_(iterator_->isolate()) {
}
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index b7fba653d5..f52b3ce4e4 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -380,12 +380,6 @@ Code* StackFrame::GetSafepointData(Isolate* isolate,
}
-bool StackFrame::HasHandler() const {
- StackHandlerIterator it(this, top_handler());
- return !it.done();
-}
-
-
#ifdef DEBUG
static bool GcSafeCodeContains(HeapObject* object, Address addr);
#endif
@@ -608,15 +602,6 @@ void StandardFrame::SetCallerFp(Address caller_fp) {
}
-bool StandardFrame::IsExpressionInsideHandler(int n) const {
- Address address = GetExpressionAddress(n);
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- if (it.handler()->includes(address)) return true;
- }
- return false;
-}
-
-
void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
@@ -710,12 +695,6 @@ int StubFrame::GetNumberOfIncomingArguments() const {
void OptimizedFrame::Iterate(ObjectVisitor* v) const {
-#ifdef DEBUG
- // Make sure that optimized frames do not contain any stack handlers.
- StackHandlerIterator it(this, top_handler());
- DCHECK(it.done());
-#endif
-
IterateCompiledFrame(v);
}
@@ -782,6 +761,15 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
}
+int JavaScriptFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+ Code* code = LookupCode();
+ DCHECK(!code->is_optimized_code());
+ HandlerTable* table = HandlerTable::cast(code->handler_table());
+ int pc_offset = static_cast<int>(pc() - code->entry());
+ return table->LookupRange(pc_offset, stack_slots);
+}
+
+
void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code,
Address pc, FILE* file,
bool print_line_number) {
@@ -843,66 +831,19 @@ void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
}
-void JavaScriptFrame::SaveOperandStack(FixedArray* store,
- int* stack_handler_index) const {
+void JavaScriptFrame::SaveOperandStack(FixedArray* store) const {
int operands_count = store->length();
DCHECK_LE(operands_count, ComputeOperandsCount());
-
- // Visit the stack in LIFO order, saving operands and stack handlers into the
- // array. The saved stack handlers store a link to the next stack handler,
- // which will allow RestoreOperandStack to rewind the handlers.
- StackHandlerIterator it(this, top_handler());
- int i = operands_count - 1;
- *stack_handler_index = -1;
- for (; !it.done(); it.Advance()) {
- StackHandler* handler = it.handler();
- // Save operands pushed after the handler was pushed.
- for (; GetOperandSlot(i) < handler->address(); i--) {
- store->set(i, GetOperand(i));
- }
- DCHECK_GE(i + 1, StackHandlerConstants::kSlotCount);
- DCHECK_EQ(handler->address(), GetOperandSlot(i));
- int next_stack_handler_index = i + 1 - StackHandlerConstants::kSlotCount;
- handler->Unwind(isolate(), store, next_stack_handler_index,
- *stack_handler_index);
- *stack_handler_index = next_stack_handler_index;
- i -= StackHandlerConstants::kSlotCount;
- }
-
- // Save any remaining operands.
- for (; i >= 0; i--) {
+ for (int i = 0; i < operands_count; i++) {
store->set(i, GetOperand(i));
}
}
-void JavaScriptFrame::RestoreOperandStack(FixedArray* store,
- int stack_handler_index) {
+void JavaScriptFrame::RestoreOperandStack(FixedArray* store) {
int operands_count = store->length();
DCHECK_LE(operands_count, ComputeOperandsCount());
- int i = 0;
- while (i <= stack_handler_index) {
- if (i < stack_handler_index) {
- // An operand.
- DCHECK_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
- Memory::Object_at(GetOperandSlot(i)) = store->get(i);
- i++;
- } else {
- // A stack handler.
- DCHECK_EQ(i, stack_handler_index);
- // The FixedArray store grows up. The stack grows down. So the operand
- // slot for i actually points to the bottom of the top word in the
- // handler. The base of the StackHandler* is the address of the bottom
- // word, which will be the last slot that is in the handler.
- int handler_slot_index = i + StackHandlerConstants::kSlotCount - 1;
- StackHandler *handler =
- StackHandler::FromAddress(GetOperandSlot(handler_slot_index));
- stack_handler_index = handler->Rewind(isolate(), store, i, fp());
- i += StackHandlerConstants::kSlotCount;
- }
- }
-
- for (; i < operands_count; i++) {
+ for (int i = 0; i < operands_count; i++) {
DCHECK_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
Memory::Object_at(GetOperandSlot(i)) = store->get(i);
}
@@ -1035,6 +976,16 @@ void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
}
+int OptimizedFrame::LookupExceptionHandlerInTable(int* stack_slots) {
+ Code* code = LookupCode();
+ DCHECK(code->is_optimized_code());
+ HandlerTable* table = HandlerTable::cast(code->handler_table());
+ int pc_offset = static_cast<int>(pc() - code->entry());
+ *stack_slots = code->stack_slots();
+ return table->LookupReturn(pc_offset);
+}
+
+
DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
int* deopt_index) {
DCHECK(is_optimized());
@@ -1286,7 +1237,6 @@ void JavaScriptFrame::Print(StringStream* accumulator,
accumulator->Add(" // expression stack (top to bottom)\n");
}
for (int i = expressions_count - 1; i >= expressions_start; i--) {
- if (IsExpressionInsideHandler(i)) continue;
accumulator->Add(" [%02d] : %o\n", i, GetExpression(i));
}
@@ -1335,17 +1285,6 @@ void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
void EntryFrame::Iterate(ObjectVisitor* v) const {
- StackHandlerIterator it(this, top_handler());
- DCHECK(!it.done());
- StackHandler* handler = it.handler();
- DCHECK(handler->is_js_entry());
- handler->Iterate(v, LookupCode());
-#ifdef DEBUG
- // Make sure that the entry frame does not contain more than one
- // stack handler.
- it.Advance();
- DCHECK(it.done());
-#endif
IteratePc(v, pc_address(), LookupCode());
}
@@ -1354,17 +1293,6 @@ void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
const int offset = StandardFrameConstants::kLastObjectOffset;
Object** base = &Memory::Object_at(sp());
Object** limit = &Memory::Object_at(fp() + offset) + 1;
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- StackHandler* handler = it.handler();
- // Traverse pointers down to - but not including - the next
- // handler in the handler chain. Update the base to skip the
- // handler and allow the handler to traverse its own pointers.
- const Address address = handler->address();
- v->VisitPointers(base, reinterpret_cast<Object**>(address));
- base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
- // Traverse the pointers in the handler itself.
- handler->Iterate(v, LookupCode());
- }
v->VisitPointers(base, limit);
}
@@ -1530,59 +1458,6 @@ InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
// -------------------------------------------------------------------------
-void StackHandler::Unwind(Isolate* isolate,
- FixedArray* array,
- int offset,
- int previous_handler_offset) const {
- STATIC_ASSERT(StackHandlerConstants::kSlotCount >= 5);
- DCHECK_LE(0, offset);
- DCHECK_GE(array->length(), offset + StackHandlerConstants::kSlotCount);
- // Unwinding a stack handler into an array chains it in the opposite
- // direction, re-using the "next" slot as a "previous" link, so that stack
- // handlers can be later re-wound in the correct order. Decode the "state"
- // slot into "index" and "kind" and store them separately, using the fp slot.
- array->set(offset, Smi::FromInt(previous_handler_offset)); // next
- array->set(offset + 1, *code_address()); // code
- array->set(offset + 2, Smi::FromInt(static_cast<int>(index()))); // state
- array->set(offset + 3, *context_address()); // context
- array->set(offset + 4, Smi::FromInt(static_cast<int>(kind()))); // fp
-
- *isolate->handler_address() = next()->address();
-}
-
-
-int StackHandler::Rewind(Isolate* isolate,
- FixedArray* array,
- int offset,
- Address fp) {
- STATIC_ASSERT(StackHandlerConstants::kSlotCount >= 5);
- DCHECK_LE(0, offset);
- DCHECK_GE(array->length(), offset + StackHandlerConstants::kSlotCount);
- Smi* prev_handler_offset = Smi::cast(array->get(offset));
- Code* code = Code::cast(array->get(offset + 1));
- Smi* smi_index = Smi::cast(array->get(offset + 2));
- Object* context = array->get(offset + 3);
- Smi* smi_kind = Smi::cast(array->get(offset + 4));
-
- unsigned state = KindField::encode(static_cast<Kind>(smi_kind->value())) |
- IndexField::encode(static_cast<unsigned>(smi_index->value()));
-
- Memory::Address_at(address() + StackHandlerConstants::kNextOffset) =
- *isolate->handler_address();
- Memory::Object_at(address() + StackHandlerConstants::kCodeOffset) = code;
- Memory::uintptr_at(address() + StackHandlerConstants::kStateOffset) = state;
- Memory::Object_at(address() + StackHandlerConstants::kContextOffset) =
- context;
- SetFp(address() + StackHandlerConstants::kFPOffset, fp);
-
- *isolate->handler_address() = address();
-
- return prev_handler_offset->value();
-}
-
-
-// -------------------------------------------------------------------------
-
int NumRegs(RegList reglist) { return base::bits::CountPopulation32(reglist); }
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 03d53dd6a1..397c7b5db9 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -66,74 +66,34 @@ class InnerPointerToCodeCache {
};
+// Every try-block pushes the context register.
+class TryBlockConstant : public AllStatic {
+ public:
+ static const int kElementCount = 1;
+};
+
+
class StackHandlerConstants : public AllStatic {
public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
-#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
- static const int kStateIntOffset = kStateOffset;
-#else
- static const int kStateIntOffset = kStateOffset + kIntSize;
-#endif
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
+ static const int kNextOffset = 0 * kPointerSize;
- static const int kSize = kFPOffset + kFPOnStackSize;
+ static const int kSize = kNextOffset + kPointerSize;
static const int kSlotCount = kSize >> kPointerSizeLog2;
};
class StackHandler BASE_EMBEDDED {
public:
- enum Kind {
- JS_ENTRY,
- CATCH,
- FINALLY,
- LAST_KIND = FINALLY
- };
-
- static const int kKindWidth = 2;
- STATIC_ASSERT(LAST_KIND < (1 << kKindWidth));
- static const int kIndexWidth = 32 - kKindWidth;
- class KindField: public BitField<StackHandler::Kind, 0, kKindWidth> {};
- class IndexField: public BitField<unsigned, kKindWidth, kIndexWidth> {};
-
// Get the address of this stack handler.
inline Address address() const;
// Get the next stack handler in the chain.
inline StackHandler* next() const;
- // Tells whether the given address is inside this handler.
- inline bool includes(Address address) const;
-
- // Garbage collection support.
- inline void Iterate(ObjectVisitor* v, Code* holder) const;
-
// Conversion support.
static inline StackHandler* FromAddress(Address address);
- // Testers
- inline bool is_js_entry() const;
- inline bool is_catch() const;
- inline bool is_finally() const;
-
- // Generator support to preserve stack handlers.
- void Unwind(Isolate* isolate, FixedArray* array, int offset,
- int previous_handler_offset) const;
- int Rewind(Isolate* isolate, FixedArray* array, int offset, Address fp);
-
private:
- // Accessors.
- inline Kind kind() const;
- inline unsigned index() const;
-
- inline Object** constant_pool_address() const;
- inline Object** context_address() const;
- inline Object** code_address() const;
- inline void SetFp(Address slot, Address fp);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
};
@@ -273,8 +233,8 @@ class StackFrame BASE_EMBEDDED {
// Get the id of this stack frame.
Id id() const { return static_cast<Id>(OffsetFrom(caller_sp())); }
- // Checks if this frame includes any stack handlers.
- bool HasHandler() const;
+ // Get the top handler from the current stack iterator.
+ inline StackHandler* top_handler() const;
// Get the type of this frame.
virtual Type type() const = 0;
@@ -309,7 +269,6 @@ class StackFrame BASE_EMBEDDED {
// Resolves pc_address through the resolution address function if one is set.
static inline Address* ResolveReturnAddressLocation(Address* pc_address);
-
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };
virtual void Print(StringStream* accumulator,
@@ -330,9 +289,6 @@ class StackFrame BASE_EMBEDDED {
PrintMode mode,
int index);
- // Get the top handler from the current stack iterator.
- inline StackHandler* top_handler() const;
-
// Compute the stack frame type for the given state.
static Type ComputeType(const StackFrameIteratorBase* iterator, State* state);
@@ -501,10 +457,6 @@ class StandardFrame: public StackFrame {
Address GetExpressionAddress(int n) const;
static Address GetExpressionAddress(Address fp, int n);
- // Determines if the n'th expression stack element is in a stack
- // handler or not. Requires traversing all handlers in this frame.
- bool IsExpressionInsideHandler(int n) const;
-
// Determines if the standard frame for the given frame pointer is
// an arguments adaptor frame.
static inline bool IsArgumentsAdaptorFrame(Address fp);
@@ -573,9 +525,9 @@ class JavaScriptFrame: public StandardFrame {
inline Object* GetOperand(int index) const;
inline int ComputeOperandsCount() const;
- // Generator support to preserve operand stack and stack handlers.
- void SaveOperandStack(FixedArray* store, int* stack_handler_index) const;
- void RestoreOperandStack(FixedArray* store, int stack_handler_index);
+ // Generator support to preserve operand stack.
+ void SaveOperandStack(FixedArray* store) const;
+ void RestoreOperandStack(FixedArray* store);
// Debugger access.
void SetParameterValue(int index, Object* value) const;
@@ -609,6 +561,10 @@ class JavaScriptFrame: public StandardFrame {
// Build a list with summaries for this frame including all inlined frames.
virtual void Summarize(List<FrameSummary>* frames);
+ // Lookup exception handler for current {pc}, returns -1 if none found. Also
+ // returns the expected number of stack slots at the handler site.
+ virtual int LookupExceptionHandlerInTable(int* stack_slots);
+
// Architecture-specific register description.
static Register fp_register();
static Register context_register();
@@ -681,6 +637,10 @@ class OptimizedFrame : public JavaScriptFrame {
virtual void Summarize(List<FrameSummary>* frames);
+ // Lookup exception handler for current {pc}, returns -1 if none found. Also
+ // returns the expected number of stack slots at the handler site.
+ virtual int LookupExceptionHandlerInTable(int* stack_slots);
+
DeoptimizationInputData* GetDeoptimizationData(int* deopt_index);
protected:
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index b0bf3b84a9..327230e75d 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -16,7 +16,7 @@
#include "src/prettyprinter.h"
#include "src/scopeinfo.h"
#include "src/scopes.h"
-#include "src/snapshot.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -443,9 +443,8 @@ void FullCodeGenerator::Initialize() {
// calculating PC offsets after generating a debug version of code. Therefore
// we disable the production of debug code in the full compiler if we are
// either generating a snapshot or we booted from a snapshot.
- generate_debug_code_ = FLAG_debug_code &&
- !masm_->serializer_enabled() &&
- !Snapshot::HaveASnapshotToStartFrom();
+ generate_debug_code_ = FLAG_debug_code && !masm_->serializer_enabled() &&
+ !info_->isolate()->snapshot_available();
masm_->set_emit_debug_code(generate_debug_code_);
masm_->set_predictable_code_size(true);
}
@@ -463,6 +462,17 @@ void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
}
+void FullCodeGenerator::CallGlobalLoadIC(Handle<String> name) {
+ if (masm()->serializer_enabled() || FLAG_vector_ics) {
+ // Vector-ICs don't work with LoadGlobalIC.
+ return CallLoadIC(CONTEXTUAL);
+ }
+ Handle<Code> ic = CodeFactory::LoadGlobalIC(
+ isolate(), isolate()->global_object(), name).code();
+ CallIC(ic, TypeFeedbackId::None());
+}
+
+
void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
Handle<Code> ic = CodeFactory::StoreIC(isolate(), language_mode()).code();
CallIC(ic, id);
@@ -860,22 +870,6 @@ void FullCodeGenerator::VisitSuperReference(SuperReference* super) {
}
-bool FullCodeGenerator::ValidateSuperCall(Call* expr) {
- Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
- if (new_target_var == nullptr) {
- // TODO(dslomov): this is not exactly correct, the spec requires us
- // to execute the constructor and only fail when an assigment to 'this'
- // is attempted. Will implement once we have general new.target support,
- // but also filed spec bug 3843 to make it an early error.
- __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
- RecordJSReturnSite(expr);
- context()->Plug(result_register());
- return false;
- }
- return true;
-}
-
-
void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
if (!info_->is_debug()) {
CodeGenerator::RecordPositions(masm_, expr->position());
@@ -909,39 +903,6 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
}
-// Lookup table for code generators for special runtime calls which are
-// generated inline.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &FullCodeGenerator::Emit##Name,
-
-const FullCodeGenerator::InlineFunctionGenerator
- FullCodeGenerator::kInlineFunctionGenerators[] = {
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- };
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
-FullCodeGenerator::InlineFunctionGenerator
- FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
- int lookup_index =
- static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction);
- DCHECK(lookup_index >= 0);
- DCHECK(static_cast<size_t>(lookup_index) <
- arraysize(kInlineFunctionGenerators));
- return kInlineFunctionGenerators[lookup_index];
-}
-
-
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
- const Runtime::Function* function = expr->function();
- DCHECK(function != NULL);
- DCHECK(function->intrinsic_type == Runtime::INLINE);
- InlineFunctionGenerator generator =
- FindInlineFunctionGenerator(function->function_id);
- ((*this).*(generator))(expr);
-}
-
-
void FullCodeGenerator::EmitGeneratorNext(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
@@ -1440,7 +1401,6 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
Label try_entry, handler_entry, exit;
__ jmp(&try_entry);
__ bind(&handler_entry);
- handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
// Exception handler code, the exception is in the result register.
// Extend the context before executing the catch block.
{ Comment cmnt(masm_, "[ Extend catch context");
@@ -1466,11 +1426,11 @@ void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
// Try block code. Sets up the exception handler chain.
__ bind(&try_entry);
- __ PushTryHandler(StackHandler::CATCH, stmt->index());
+ EnterTryBlock(stmt->index(), &handler_entry);
{ TryCatch try_body(this);
Visit(stmt->try_block());
}
- __ PopTryHandler();
+ ExitTryBlock(stmt->index());
__ bind(&exit);
}
@@ -1504,7 +1464,6 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Jump to try-handler setup and try-block code.
__ jmp(&try_entry);
__ bind(&handler_entry);
- handler_table()->set(stmt->index(), Smi::FromInt(handler_entry.pos()));
// Exception handler code. This code is only executed when an exception
// is thrown. The exception is in the result register, and must be
// preserved by the finally block. Call the finally block and then
@@ -1523,11 +1482,11 @@ void FullCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// Set up try handler.
__ bind(&try_entry);
- __ PushTryHandler(StackHandler::FINALLY, stmt->index());
+ EnterTryBlock(stmt->index(), &handler_entry);
{ TryFinally try_body(this, &finally_entry);
Visit(stmt->try_block());
}
- __ PopTryHandler();
+ ExitTryBlock(stmt->index());
// Execute the finally block on the way out. Clobber the unpredictable
// value in the result register with one that's safe for GC because the
// finally block will unconditionally preserve the result register on the
@@ -1682,13 +1641,54 @@ void FullCodeGenerator::VisitThrow(Throw* expr) {
}
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryCatch::Exit(
- int* stack_depth,
- int* context_length) {
+void FullCodeGenerator::EnterTryBlock(int index, Label* handler) {
+ handler_table()->SetRangeStart(index, masm()->pc_offset());
+ handler_table()->SetRangeHandler(index, handler->pos());
+
+ // Determine expression stack depth of try statement.
+ int stack_depth = info_->scope()->num_stack_slots(); // Include stack locals.
+ for (NestedStatement* current = nesting_stack_; current != NULL; /*nop*/) {
+ current = current->AccumulateDepth(&stack_depth);
+ }
+ handler_table()->SetRangeDepth(index, stack_depth);
+
+ // Push context onto operand stack.
+ STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
+ __ Push(context_register());
+}
+
+
+void FullCodeGenerator::ExitTryBlock(int index) {
+ handler_table()->SetRangeEnd(index, masm()->pc_offset());
+
+ // Drop context from operand stack.
+ __ Drop(TryBlockConstant::kElementCount);
+}
+
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+ int* stack_depth, int* context_length) {
// The macros used here must preserve the result register.
- __ Drop(*stack_depth);
- __ PopTryHandler();
+
+ // Because the handler block contains the context of the finally
+ // code, we can restore it directly from there for the finally code
+ // rather than iteratively unwinding contexts via their previous
+ // links.
+ if (*context_length > 0) {
+ __ Drop(*stack_depth); // Down to the handler block.
+ // Restore the context to its dedicated register and the stack.
+ STATIC_ASSERT(TryFinally::kElementCount == 1);
+ __ Pop(codegen_->context_register());
+ codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
+ codegen_->context_register());
+ } else {
+ // Down to the handler block and also drop context.
+ __ Drop(*stack_depth + kElementCount);
+ }
+ __ Call(finally_entry_);
+
*stack_depth = 0;
+ *context_length = 0;
return previous_;
}
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 6e0ac6253c..19608a4fc9 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -156,6 +156,11 @@ class FullCodeGenerator: public AstVisitor {
return previous_;
}
+ // Like the Exit() method above, but limited to accumulating stack depth.
+ virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ return previous_;
+ }
+
protected:
MacroAssembler* masm() { return codegen_->masm(); }
@@ -225,22 +230,36 @@ class FullCodeGenerator: public AstVisitor {
// The try block of a try/catch statement.
class TryCatch : public NestedStatement {
public:
- explicit TryCatch(FullCodeGenerator* codegen) : NestedStatement(codegen) {
- }
+ static const int kElementCount = TryBlockConstant::kElementCount;
+
+ explicit TryCatch(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
virtual ~TryCatch() {}
- virtual NestedStatement* Exit(int* stack_depth, int* context_length);
+ virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
+ *stack_depth += kElementCount;
+ return previous_;
+ }
+ virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ *stack_depth += kElementCount;
+ return previous_;
+ }
};
// The try block of a try/finally statement.
class TryFinally : public NestedStatement {
public:
+ static const int kElementCount = TryBlockConstant::kElementCount;
+
TryFinally(FullCodeGenerator* codegen, Label* finally_entry)
: NestedStatement(codegen), finally_entry_(finally_entry) {
}
virtual ~TryFinally() {}
virtual NestedStatement* Exit(int* stack_depth, int* context_length);
+ virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ *stack_depth += kElementCount;
+ return previous_;
+ }
private:
Label* finally_entry_;
@@ -249,15 +268,19 @@ class FullCodeGenerator: public AstVisitor {
// The finally block of a try/finally statement.
class Finally : public NestedStatement {
public:
- static const int kElementCount = 5;
+ static const int kElementCount = 3;
- explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) { }
+ explicit Finally(FullCodeGenerator* codegen) : NestedStatement(codegen) {}
virtual ~Finally() {}
virtual NestedStatement* Exit(int* stack_depth, int* context_length) {
*stack_depth += kElementCount;
return previous_;
}
+ virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ *stack_depth += kElementCount;
+ return previous_;
+ }
};
// The body of a for/in loop.
@@ -274,6 +297,10 @@ class FullCodeGenerator: public AstVisitor {
*stack_depth += kElementCount;
return previous_;
}
+ virtual NestedStatement* AccumulateDepth(int* stack_depth) {
+ *stack_depth += kElementCount;
+ return previous_;
+ }
};
@@ -291,11 +318,6 @@ class FullCodeGenerator: public AstVisitor {
}
};
- // Type of a member function that generates inline code for a native function.
- typedef void (FullCodeGenerator::*InlineFunctionGenerator)(CallRuntime* expr);
-
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
// A platform-specific utility to overwrite the accumulator register
// with a GC-safe value.
void ClearAccumulator();
@@ -495,15 +517,52 @@ class FullCodeGenerator: public AstVisitor {
void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
void EmitKeyedSuperCallWithLoadIC(Call* expr);
- // Platform-specific code for inline runtime calls.
- InlineFunctionGenerator FindInlineFunctionGenerator(Runtime::FunctionId id);
-
- void EmitInlineRuntimeCall(CallRuntime* expr);
-
-#define EMIT_INLINE_RUNTIME_CALL(name, x, y) \
- void Emit##name(CallRuntime* expr);
- INLINE_FUNCTION_LIST(EMIT_INLINE_RUNTIME_CALL)
-#undef EMIT_INLINE_RUNTIME_CALL
+#define FOR_EACH_FULL_CODE_INTRINSIC(F) \
+ F(IsSmi) \
+ F(IsNonNegativeSmi) \
+ F(IsArray) \
+ F(IsRegExp) \
+ F(IsJSProxy) \
+ F(IsConstructCall) \
+ F(CallFunction) \
+ F(DefaultConstructorCallSuper) \
+ F(ArgumentsLength) \
+ F(Arguments) \
+ F(ValueOf) \
+ F(SetValueOf) \
+ F(DateField) \
+ F(StringCharFromCode) \
+ F(StringCharAt) \
+ F(OneByteSeqStringSetChar) \
+ F(TwoByteSeqStringSetChar) \
+ F(ObjectEquals) \
+ F(IsObject) \
+ F(IsFunction) \
+ F(IsUndetectableObject) \
+ F(IsSpecObject) \
+ F(IsStringWrapperSafeForDefaultValueOf) \
+ F(MathPow) \
+ F(IsMinusZero) \
+ F(HasCachedArrayIndex) \
+ F(GetCachedArrayIndex) \
+ F(FastOneByteArrayJoin) \
+ F(GeneratorNext) \
+ F(GeneratorThrow) \
+ F(DebugBreakInOptimizedCode) \
+ F(ClassOf) \
+ F(StringCharCodeAt) \
+ F(StringAdd) \
+ F(SubString) \
+ F(StringCompare) \
+ F(RegExpExec) \
+ F(RegExpConstructResult) \
+ F(GetFromCache) \
+ F(NumberToString) \
+ F(DebugIsActive)
+
+#define GENERATOR_DECLARATION(Name) void Emit##Name(CallRuntime* call);
+ FOR_EACH_FULL_CODE_INTRINSIC(GENERATOR_DECLARATION)
+#undef GENERATOR_DECLARATION
// Platform-specific code for resuming generators.
void EmitGeneratorResume(Expression *generator,
@@ -589,19 +648,6 @@ class FullCodeGenerator: public AstVisitor {
// is expected in the accumulator.
void EmitAssignment(Expression* expr);
- // Shall an error be thrown if assignment with 'op' operation is perfomed
- // on this variable in given language mode?
- static bool IsSignallingAssignmentToConst(Variable* var, Token::Value op,
- LanguageMode language_mode) {
- if (var->mode() == CONST) return op != Token::INIT_CONST;
-
- if (var->mode() == CONST_LEGACY) {
- return is_strict(language_mode) && op != Token::INIT_CONST_LEGACY;
- }
-
- return false;
- }
-
// Complete a variable assignment. The right-hand-side value is expected
// in the accumulator.
void EmitVariableAssignment(Variable* var,
@@ -640,13 +686,13 @@ class FullCodeGenerator: public AstVisitor {
void EmitSetHomeObjectIfNeeded(Expression* initializer, int offset);
void EmitLoadSuperConstructor();
- bool ValidateSuperCall(Call* expr);
void CallIC(Handle<Code> code,
TypeFeedbackId id = TypeFeedbackId::None());
void CallLoadIC(ContextualMode mode,
TypeFeedbackId id = TypeFeedbackId::None());
+ void CallGlobalLoadIC(Handle<String> name);
void CallStoreIC(TypeFeedbackId id = TypeFeedbackId::None());
void SetFunctionPosition(FunctionLiteral* fun);
@@ -656,6 +702,8 @@ class FullCodeGenerator: public AstVisitor {
void SetSourcePosition(int pos);
// Non-local control flow support.
+ void EnterTryBlock(int handler_index, Label* handler);
+ void ExitTryBlock(int handler_index);
void EnterFinallyBlock();
void ExitFinallyBlock();
@@ -711,10 +759,7 @@ class FullCodeGenerator: public AstVisitor {
void PopulateDeoptimizationData(Handle<Code> code);
void PopulateTypeFeedbackInfo(Handle<Code> code);
- bool MustCreateObjectLiteralWithRuntime(ObjectLiteral* expr) const;
- bool MustCreateArrayLiteralWithRuntime(ArrayLiteral* expr) const;
-
- Handle<FixedArray> handler_table() { return handler_table_; }
+ Handle<HandlerTable> handler_table() { return handler_table_; }
struct BailoutEntry {
BailoutId id;
@@ -932,7 +977,7 @@ class FullCodeGenerator: public AstVisitor {
ZoneList<BailoutEntry> bailout_entries_;
ZoneList<BackEdgeEntry> back_edges_;
int ic_total_count_;
- Handle<FixedArray> handler_table_;
+ Handle<HandlerTable> handler_table_;
Handle<Cell> profiling_counter_;
bool generate_debug_code_;
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index 69b48d6644..044f6fe3f4 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifdef ENABLE_GDB_JIT_INTERFACE
#include "src/v8.h"
#include "src/base/bits.h"
@@ -14,12 +13,15 @@
#include "src/gdb-jit.h"
#include "src/global-handles.h"
#include "src/messages.h"
-#include "src/natives.h"
+#include "src/objects.h"
#include "src/ostreams.h"
-#include "src/scopes.h"
+#include "src/snapshot/natives.h"
namespace v8 {
namespace internal {
+namespace GDBJITInterface {
+
+#ifdef ENABLE_GDB_JIT_INTERFACE
#ifdef __APPLE__
#define __MACH_O
@@ -933,15 +935,9 @@ class CodeDescription BASE_EMBEDDED {
};
#endif
- CodeDescription(const char* name, Code* code, Handle<Script> script,
- LineInfo* lineinfo, GDBJITInterface::CodeTag tag,
- CompilationInfo* info)
- : name_(name),
- code_(code),
- script_(script),
- lineinfo_(lineinfo),
- tag_(tag),
- info_(info) {}
+ CodeDescription(const char* name, Code* code, SharedFunctionInfo* shared,
+ LineInfo* lineinfo)
+ : name_(name), code_(code), shared_info_(shared), lineinfo_(lineinfo) {}
const char* name() const {
return name_;
@@ -949,16 +945,16 @@ class CodeDescription BASE_EMBEDDED {
LineInfo* lineinfo() const { return lineinfo_; }
- GDBJITInterface::CodeTag tag() const {
- return tag_;
+ bool is_function() const {
+ Code::Kind kind = code_->kind();
+ return kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION;
}
- CompilationInfo* info() const {
- return info_;
- }
+ bool has_scope_info() const { return shared_info_ != NULL; }
- bool IsInfoAvailable() const {
- return info_ != NULL;
+ ScopeInfo* scope_info() const {
+ DCHECK(has_scope_info());
+ return shared_info_->scope_info();
}
uintptr_t CodeStart() const {
@@ -973,12 +969,16 @@ class CodeDescription BASE_EMBEDDED {
return CodeEnd() - CodeStart();
}
+ bool has_script() {
+ return shared_info_ != NULL && shared_info_->script()->IsScript();
+ }
+
+ Script* script() { return Script::cast(shared_info_->script()); }
+
bool IsLineInfoAvailable() {
- return !script_.is_null() &&
- script_->source()->IsString() &&
- script_->HasValidSource() &&
- script_->name()->IsString() &&
- lineinfo_ != NULL;
+ return has_script() && script()->source()->IsString() &&
+ script()->HasValidSource() && script()->name()->IsString() &&
+ lineinfo_ != NULL;
}
#if V8_TARGET_ARCH_X64
@@ -994,21 +994,17 @@ class CodeDescription BASE_EMBEDDED {
#endif
SmartArrayPointer<char> GetFilename() {
- return String::cast(script_->name())->ToCString();
+ return String::cast(script()->name())->ToCString();
}
- int GetScriptLineNumber(int pos) {
- return script_->GetLineNumber(pos) + 1;
- }
+ int GetScriptLineNumber(int pos) { return script()->GetLineNumber(pos) + 1; }
private:
const char* name_;
Code* code_;
- Handle<Script> script_;
+ SharedFunctionInfo* shared_info_;
LineInfo* lineinfo_;
- GDBJITInterface::CodeTag tag_;
- CompilationInfo* info_;
#if V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
#endif
@@ -1095,8 +1091,8 @@ class DebugInfoSection : public DebugSection {
w->Write<uint8_t>(kPointerSize);
w->WriteString("v8value");
- if (desc_->IsInfoAvailable()) {
- Scope* scope = desc_->info()->scope();
+ if (desc_->has_scope_info()) {
+ ScopeInfo* scope = desc_->scope_info();
w->WriteULEB128(2);
w->WriteString(desc_->name());
w->Write<intptr_t>(desc_->CodeStart());
@@ -1118,8 +1114,8 @@ class DebugInfoSection : public DebugSection {
#endif
fb_block_size.set(static_cast<uint32_t>(w->position() - fb_block_start));
- int params = scope->num_parameters();
- int slots = scope->num_stack_slots();
+ int params = scope->ParameterCount();
+ int slots = scope->StackLocalCount();
int context_slots = scope->ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
@@ -1129,7 +1125,7 @@ class DebugInfoSection : public DebugSection {
for (int param = 0; param < params; ++param) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
- scope->parameter(param)->name()->ToCString(DISALLOW_NULLS).get());
+ scope->ParameterName(param)->ToCString(DISALLOW_NULLS).get());
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1174,13 +1170,10 @@ class DebugInfoSection : public DebugSection {
w->WriteString(builder.Finalize());
}
- ZoneList<Variable*> stack_locals(locals, scope->zone());
- ZoneList<Variable*> context_locals(context_slots, scope->zone());
- scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
for (int local = 0; local < locals; ++local) {
w->WriteULEB128(current_abbreviation++);
w->WriteString(
- stack_locals[local]->name()->ToCString(DISALLOW_NULLS).get());
+ scope->StackLocalName(local)->ToCString(DISALLOW_NULLS).get());
w->Write<uint32_t>(ty_offset);
Writer::Slot<uint32_t> block_size = w->CreateSlotHere<uint32_t>();
uintptr_t block_start = w->position();
@@ -1302,7 +1295,7 @@ class DebugAbbrevSection : public DebugSection {
bool WriteBodyInternal(Writer* w) {
int current_abbreviation = 1;
- bool extra_info = desc_->IsInfoAvailable();
+ bool extra_info = desc_->has_scope_info();
DCHECK(desc_->IsLineInfoAvailable());
w->WriteULEB128(current_abbreviation++);
w->WriteULEB128(DW_TAG_COMPILE_UNIT);
@@ -1319,9 +1312,9 @@ class DebugAbbrevSection : public DebugSection {
w->WriteULEB128(0);
if (extra_info) {
- Scope* scope = desc_->info()->scope();
- int params = scope->num_parameters();
- int slots = scope->num_stack_slots();
+ ScopeInfo* scope = desc_->scope_info();
+ int params = scope->ParameterCount();
+ int slots = scope->StackLocalCount();
int context_slots = scope->ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
@@ -1868,27 +1861,7 @@ static void DestroyCodeEntry(JITCodeEntry* entry) {
}
-static void RegisterCodeEntry(JITCodeEntry* entry,
- bool dump_if_enabled,
- const char* name_hint) {
-#if defined(DEBUG) && !V8_OS_WIN
- static int file_num = 0;
- if (FLAG_gdbjit_dump && dump_if_enabled) {
- static const int kMaxFileNameSize = 64;
- static const char* kElfFilePrefix = "/tmp/elfdump";
- static const char* kObjFileExt = ".o";
- char file_name[64];
-
- SNPrintF(Vector<char>(file_name, kMaxFileNameSize),
- "%s%s%d%s",
- kElfFilePrefix,
- (name_hint != NULL) ? name_hint : "",
- file_num++,
- kObjFileExt);
- WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
- }
-#endif
-
+static void RegisterCodeEntry(JITCodeEntry* entry) {
entry->next_ = __jit_debug_descriptor.first_entry_;
if (entry->next_ != NULL) entry->next_->prev_ = entry;
__jit_debug_descriptor.first_entry_ =
@@ -1955,69 +1928,65 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
}
-static bool SameCodeObjects(void* key1, void* key2) {
- return key1 == key2;
-}
-
+struct AddressRange {
+ Address start;
+ Address end;
+};
-static HashMap* GetEntries() {
- static HashMap* entries = NULL;
- if (entries == NULL) {
- entries = new HashMap(&SameCodeObjects);
+struct SplayTreeConfig {
+ typedef AddressRange Key;
+ typedef JITCodeEntry* Value;
+ static const AddressRange kNoKey;
+ static Value NoValue() { return NULL; }
+ static int Compare(const AddressRange& a, const AddressRange& b) {
+ // ptrdiff_t probably doesn't fit in an int.
+ if (a.start < b.start) return -1;
+ if (a.start == b.start) return 0;
+ return 1;
}
- return entries;
-}
+};
+const AddressRange SplayTreeConfig::kNoKey = {0, 0};
+typedef SplayTree<SplayTreeConfig> CodeMap;
-static uint32_t HashForCodeObject(Code* code) {
- static const uintptr_t kGoldenRatio = 2654435761u;
- uintptr_t hash = reinterpret_cast<uintptr_t>(code->address());
- return static_cast<uint32_t>((hash >> kCodeAlignmentBits) * kGoldenRatio);
+static CodeMap* GetCodeMap() {
+ static CodeMap* code_map = NULL;
+ if (code_map == NULL) code_map = new CodeMap();
+ return code_map;
}
-static const intptr_t kLineInfoTag = 0x1;
-
-
-static bool IsLineInfoTagged(void* ptr) {
- return 0 != (reinterpret_cast<intptr_t>(ptr) & kLineInfoTag);
+static uint32_t HashCodeAddress(Address addr) {
+ static const uintptr_t kGoldenRatio = 2654435761u;
+ uintptr_t offset = OffsetFrom(addr);
+ return static_cast<uint32_t>((offset >> kCodeAlignmentBits) * kGoldenRatio);
}
-static void* TagLineInfo(LineInfo* ptr) {
- return reinterpret_cast<void*>(
- reinterpret_cast<intptr_t>(ptr) | kLineInfoTag);
+static HashMap* GetLineMap() {
+ static HashMap* line_map = NULL;
+ if (line_map == NULL) line_map = new HashMap(&HashMap::PointersMatch);
+ return line_map;
}
-static LineInfo* UntagLineInfo(void* ptr) {
- return reinterpret_cast<LineInfo*>(reinterpret_cast<intptr_t>(ptr) &
- ~kLineInfoTag);
+static void PutLineInfo(Address addr, LineInfo* info) {
+ HashMap* line_map = GetLineMap();
+ HashMap::Entry* e = line_map->Lookup(addr, HashCodeAddress(addr), true);
+ if (e->value != NULL) delete static_cast<LineInfo*>(e->value);
+ e->value = info;
}
-void GDBJITInterface::AddCode(Handle<Name> name,
- Handle<Script> script,
- Handle<Code> code,
- CompilationInfo* info) {
- if (!FLAG_gdbjit) return;
-
- Script::InitLineEnds(script);
-
- if (!name.is_null() && name->IsString()) {
- SmartArrayPointer<char> name_cstring =
- Handle<String>::cast(name)->ToCString(DISALLOW_NULLS);
- AddCode(name_cstring.get(), *code, GDBJITInterface::FUNCTION, *script,
- info);
- } else {
- AddCode("", *code, GDBJITInterface::FUNCTION, *script, info);
- }
+static LineInfo* GetLineInfo(Address addr) {
+ void* value = GetLineMap()->Remove(addr, HashCodeAddress(addr));
+ return static_cast<LineInfo*>(value);
}
static void AddUnwindInfo(CodeDescription* desc) {
#if V8_TARGET_ARCH_X64
- if (desc->tag() == GDBJITInterface::FUNCTION) {
+ if (desc->is_function()) {
// To avoid propagating unwinding information through
// compilation pipeline we use an approximation.
// For most use cases this should not affect usability.
@@ -2055,39 +2024,83 @@ static void AddUnwindInfo(CodeDescription* desc) {
static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
-void GDBJITInterface::AddCode(const char* name,
- Code* code,
- GDBJITInterface::CodeTag tag,
- Script* script,
- CompilationInfo* info) {
- base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
+// Remove entries from the splay tree that intersect the given address range,
+// and deregister them from GDB.
+static void RemoveJITCodeEntries(CodeMap* map, const AddressRange& range) {
+ DCHECK(range.start < range.end);
+ CodeMap::Locator cur;
+ if (map->FindGreatestLessThan(range, &cur) || map->FindLeast(&cur)) {
+ // Skip entries that are entirely less than the range of interest.
+ while (cur.key().end <= range.start) {
+ // CodeMap::FindLeastGreaterThan succeeds for entries whose key is greater
+ // than _or equal to_ the given key, so we have to advance our key to get
+ // the next one.
+ AddressRange new_key;
+ new_key.start = cur.key().end;
+ new_key.end = 0;
+ if (!map->FindLeastGreaterThan(new_key, &cur)) return;
+ }
+ // Evict intersecting ranges.
+ while (cur.key().start < range.end) {
+ AddressRange old_range = cur.key();
+ JITCodeEntry* old_entry = cur.value();
+
+ UnregisterCodeEntry(old_entry);
+ DestroyCodeEntry(old_entry);
+
+ CHECK(map->Remove(old_range));
+ if (!map->FindLeastGreaterThan(old_range, &cur)) return;
+ }
+ }
+}
+
+
+// Insert the entry into the splay tree and register it with GDB.
+static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
+ JITCodeEntry* entry, bool dump_if_enabled,
+ const char* name_hint) {
+#if defined(DEBUG) && !V8_OS_WIN
+ static int file_num = 0;
+ if (FLAG_gdbjit_dump && dump_if_enabled) {
+ static const int kMaxFileNameSize = 64;
+ char file_name[64];
+
+ SNPrintF(Vector<char>(file_name, kMaxFileNameSize), "/tmp/elfdump%s%d.o",
+ (name_hint != NULL) ? name_hint : "", file_num++);
+ WriteBytes(file_name, entry->symfile_addr_, entry->symfile_size_);
+ }
+#endif
+
+ CodeMap::Locator cur;
+ CHECK(map->Insert(range, &cur));
+ cur.set_value(entry);
+
+ RegisterCodeEntry(entry);
+}
+
+
+static void AddCode(const char* name, Code* code, SharedFunctionInfo* shared,
+ LineInfo* lineinfo) {
DisallowHeapAllocation no_gc;
- HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
- if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
+ CodeMap* code_map = GetCodeMap();
+ AddressRange range;
+ range.start = code->address();
+ range.end = code->address() + code->CodeSize();
+ RemoveJITCodeEntries(code_map, range);
- LineInfo* lineinfo = UntagLineInfo(e->value);
- CodeDescription code_desc(name,
- code,
- script != NULL ? Handle<Script>(script)
- : Handle<Script>(),
- lineinfo,
- tag,
- info);
+ CodeDescription code_desc(name, code, shared, lineinfo);
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
- GetEntries()->Remove(code, HashForCodeObject(code));
return;
}
AddUnwindInfo(&code_desc);
Isolate* isolate = code->GetIsolate();
JITCodeEntry* entry = CreateELFObject(&code_desc, isolate);
- DCHECK(!IsLineInfoTagged(entry));
delete lineinfo;
- e->value = entry;
const char* name_hint = NULL;
bool should_dump = false;
@@ -2100,82 +2113,35 @@ void GDBJITInterface::AddCode(const char* name,
should_dump = (name_hint != NULL);
}
}
- RegisterCodeEntry(entry, should_dump, name_hint);
+ AddJITCodeEntry(code_map, range, entry, should_dump, name_hint);
}
-void GDBJITInterface::RemoveCode(Code* code) {
+void EventHandler(const v8::JitCodeEvent* event) {
if (!FLAG_gdbjit) return;
-
- base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
- HashMap::Entry* e = GetEntries()->Lookup(code,
- HashForCodeObject(code),
- false);
- if (e == NULL) return;
-
- if (IsLineInfoTagged(e->value)) {
- delete UntagLineInfo(e->value);
- } else {
- JITCodeEntry* entry = static_cast<JITCodeEntry*>(e->value);
- UnregisterCodeEntry(entry);
- DestroyCodeEntry(entry);
- }
- e->value = NULL;
- GetEntries()->Remove(code, HashForCodeObject(code));
-}
-
-
-void GDBJITInterface::RemoveCodeRange(Address start, Address end) {
- HashMap* entries = GetEntries();
- Zone zone;
- ZoneList<Code*> dead_codes(1, &zone);
-
- for (HashMap::Entry* e = entries->Start(); e != NULL; e = entries->Next(e)) {
- Code* code = reinterpret_cast<Code*>(e->key);
- if (code->address() >= start && code->address() < end) {
- dead_codes.Add(code, &zone);
- }
- }
-
- for (int i = 0; i < dead_codes.length(); i++) {
- RemoveCode(dead_codes.at(i));
- }
-}
-
-
-static void RegisterDetailedLineInfo(Code* code, LineInfo* line_info) {
base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
- DCHECK(!IsLineInfoTagged(line_info));
- HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
- DCHECK(e->value == NULL);
- e->value = TagLineInfo(line_info);
-}
-
-
-void GDBJITInterface::EventHandler(const v8::JitCodeEvent* event) {
- if (!FLAG_gdbjit) return;
switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: {
- Code* code = Code::GetCodeFromTargetAddress(
- reinterpret_cast<Address>(event->code_start));
- if (code->kind() == Code::OPTIMIZED_FUNCTION ||
- code->kind() == Code::FUNCTION) {
- break;
- }
+ Address addr = reinterpret_cast<Address>(event->code_start);
+ Code* code = Code::GetCodeFromTargetAddress(addr);
+ LineInfo* lineinfo = GetLineInfo(addr);
EmbeddedVector<char, 256> buffer;
StringBuilder builder(buffer.start(), buffer.length());
builder.AddSubstring(event->name.str, static_cast<int>(event->name.len));
- AddCode(builder.Finalize(), code, NON_FUNCTION, NULL, NULL);
+ // It's called UnboundScript in the API but it's a SharedFunctionInfo.
+ SharedFunctionInfo* shared =
+ event->script.IsEmpty() ? NULL : *Utils::OpenHandle(*event->script);
+ AddCode(builder.Finalize(), code, shared, lineinfo);
break;
}
case v8::JitCodeEvent::CODE_MOVED:
+ // Enabling the GDB JIT interface should disable code compaction.
+ UNREACHABLE();
break;
- case v8::JitCodeEvent::CODE_REMOVED: {
- Code* code = Code::GetCodeFromTargetAddress(
- reinterpret_cast<Address>(event->code_start));
- RemoveCode(code);
+ case v8::JitCodeEvent::CODE_REMOVED:
+ // Do nothing. Instead, adding code causes eviction of any entry whose
+ // address range intersects the address range of the added code.
break;
- }
case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: {
LineInfo* line_info = reinterpret_cast<LineInfo*>(event->user_data);
line_info->SetPosition(static_cast<intptr_t>(event->line_info.offset),
@@ -2191,14 +2157,12 @@ void GDBJITInterface::EventHandler(const v8::JitCodeEvent* event) {
}
case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
LineInfo* line_info = reinterpret_cast<LineInfo*>(event->user_data);
- Code* code = Code::GetCodeFromTargetAddress(
- reinterpret_cast<Address>(event->code_start));
- RegisterDetailedLineInfo(code, line_info);
+ PutLineInfo(reinterpret_cast<Address>(event->code_start), line_info);
break;
}
}
}
-
-
-} } // namespace v8::internal
#endif
+} // namespace GDBJITInterface
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/gdb-jit.h b/deps/v8/src/gdb-jit.h
index 14536cf0b3..45382702da 100644
--- a/deps/v8/src/gdb-jit.h
+++ b/deps/v8/src/gdb-jit.h
@@ -5,50 +5,35 @@
#ifndef V8_GDB_JIT_H_
#define V8_GDB_JIT_H_
-#include "src/allocation.h"
+#include "src/v8.h"
//
-// Basic implementation of GDB JIT Interface client.
-// GBD JIT Interface is supported in GDB 7.0 and above.
-// Currently on x64 and ia32 architectures and Linux OS are supported.
+// GDB has two ways of interacting with JIT code. With the "JIT compilation
+// interface", V8 can tell GDB when it emits JIT code. Unfortunately to do so,
+// it has to create platform-native object files, possibly with platform-native
+// debugging information. Currently only ELF and Mach-O are supported, which
+// limits this interface to Linux and Mac OS. This JIT compilation interface
+// was introduced in GDB 7.0. V8 support can be enabled with the --gdbjit flag.
+//
+// The other way that GDB can know about V8 code is via the "custom JIT reader"
+// interface, in which a GDB extension parses V8's private data to determine the
+// function, file, and line of a JIT frame, and how to unwind those frames.
+// This interface was introduced in GDB 7.6. This interface still relies on V8
+// to register its code via the JIT compilation interface, but doesn't require
+// that V8 create ELF images. Support will be added for this interface in the
+// future.
//
-
-#ifdef ENABLE_GDB_JIT_INTERFACE
-#include "src/v8.h"
-
-#include "src/factory.h"
namespace v8 {
namespace internal {
-
-class CompilationInfo;
-
-class GDBJITInterface: public AllStatic {
- public:
- enum CodeTag { NON_FUNCTION, FUNCTION };
-
- // Main entry point into GDB JIT realized as a JitCodeEventHandler.
- static void EventHandler(const v8::JitCodeEvent* event);
-
- static void AddCode(Handle<Name> name,
- Handle<Script> script,
- Handle<Code> code,
- CompilationInfo* info);
-
- static void RemoveCodeRange(Address start, Address end);
-
- private:
- static void AddCode(const char* name, Code* code, CodeTag tag, Script* script,
- CompilationInfo* info);
-
- static void RemoveCode(Code* code);
-};
-
-#define GDBJIT(action) GDBJITInterface::action
-
-} } // namespace v8::internal
-#else
-#define GDBJIT(action) ((void) 0)
+namespace GDBJITInterface {
+#ifdef ENABLE_GDB_JIT_INTERFACE
+// JitCodeEventHandler that creates ELF/Mach-O objects and registers them with
+// GDB.
+void EventHandler(const v8::JitCodeEvent* event);
#endif
+} // namespace GDBJITInterface
+} // namespace internal
+} // namespace v8
#endif
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 277cad6c3c..25c6b4efda 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -230,20 +230,20 @@ class GlobalHandles::Node {
weak_callback_ = weak_callback;
}
- void MakePhantom(void* parameter, int number_of_internal_fields,
- PhantomCallbackData<void>::Callback phantom_callback) {
- DCHECK(number_of_internal_fields >= 0);
- DCHECK(number_of_internal_fields <= 2);
- DCHECK(phantom_callback != NULL);
+ void MakeWeak(void* parameter,
+ WeakCallbackInfo<void>::Callback phantom_callback,
+ v8::WeakCallbackType type) {
+ DCHECK(phantom_callback != nullptr);
DCHECK(IsInUse());
- CHECK(object_ != NULL);
+ CHECK(object_ != nullptr);
set_state(WEAK);
- if (number_of_internal_fields == 0) {
- set_weakness_type(PHANTOM_WEAK_0_INTERNAL_FIELDS);
- } else if (number_of_internal_fields == 1) {
- set_weakness_type(PHANTOM_WEAK_1_INTERNAL_FIELDS);
- } else {
+ switch (type) {
+ case v8::WeakCallbackType::kParameter:
+ set_weakness_type(PHANTOM_WEAK);
+ break;
+ case v8::WeakCallbackType::kInternalFields:
set_weakness_type(PHANTOM_WEAK_2_INTERNAL_FIELDS);
+ break;
}
set_parameter(parameter);
weak_callback_ = reinterpret_cast<WeakCallback>(phantom_callback);
@@ -264,38 +264,28 @@ class GlobalHandles::Node {
if (weak_callback_ != NULL) {
if (weakness_type() == NORMAL_WEAK) return;
- v8::Isolate* api_isolate = reinterpret_cast<v8::Isolate*>(isolate);
-
- DCHECK(weakness_type() == PHANTOM_WEAK_0_INTERNAL_FIELDS ||
- weakness_type() == PHANTOM_WEAK_1_INTERNAL_FIELDS ||
+ DCHECK(weakness_type() == PHANTOM_WEAK ||
weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
- Object* internal_field0 = nullptr;
- Object* internal_field1 = nullptr;
- if (weakness_type() != PHANTOM_WEAK_0_INTERNAL_FIELDS) {
- JSObject* jsobject = reinterpret_cast<JSObject*>(object());
- DCHECK(jsobject->IsJSObject());
- DCHECK(jsobject->GetInternalFieldCount() >= 1);
- internal_field0 = jsobject->GetInternalField(0);
- if (weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS) {
- DCHECK(jsobject->GetInternalFieldCount() >= 2);
- internal_field1 = jsobject->GetInternalField(1);
+ void* internal_fields[v8::kInternalFieldsInWeakCallback] = {nullptr,
+ nullptr};
+ if (weakness_type() != PHANTOM_WEAK && object()->IsJSObject()) {
+ auto jsobject = JSObject::cast(object());
+ int field_count = jsobject->GetInternalFieldCount();
+ for (int i = 0; i < v8::kInternalFieldsInWeakCallback; ++i) {
+ if (field_count == i) break;
+ auto field = jsobject->GetInternalField(i);
+ if (field->IsSmi()) internal_fields[i] = field;
}
}
- // Zap with harmless value.
- *location() = Smi::FromInt(0);
- typedef PhantomCallbackData<void> Data;
-
- if (!internal_field0->IsSmi()) internal_field0 = nullptr;
- if (!internal_field1->IsSmi()) internal_field1 = nullptr;
-
- Data data(api_isolate, parameter(), internal_field0, internal_field1);
- Data::Callback callback =
- reinterpret_cast<Data::Callback>(weak_callback_);
+ // Zap with something dangerous.
+ *location() = reinterpret_cast<Object*>(0x6057ca11);
+ typedef v8::WeakCallbackInfo<void> Data;
+ auto callback = reinterpret_cast<Data::Callback>(weak_callback_);
pending_phantom_callbacks->Add(
- PendingPhantomCallback(this, data, callback));
+ PendingPhantomCallback(this, callback, parameter(), internal_fields));
DCHECK(IsInUse());
set_state(NEAR_DEATH);
}
@@ -562,14 +552,13 @@ void GlobalHandles::MakeWeak(Object** location, void* parameter,
}
-typedef PhantomCallbackData<void>::Callback GenericCallback;
+typedef v8::WeakCallbackInfo<void>::Callback GenericCallback;
-void GlobalHandles::MakePhantom(Object** location, void* parameter,
- int number_of_internal_fields,
- GenericCallback phantom_callback) {
- Node::FromLocation(location)
- ->MakePhantom(parameter, number_of_internal_fields, phantom_callback);
+void GlobalHandles::MakeWeak(Object** location, void* parameter,
+ GenericCallback phantom_callback,
+ v8::WeakCallbackType type) {
+ Node::FromLocation(location)->MakeWeak(parameter, phantom_callback, type);
}
@@ -633,13 +622,12 @@ void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
// In the internal fields case we will need the internal
// fields, so we can't zap the handle.
if (node->state() == Node::PENDING) {
- if (node->weakness_type() == PHANTOM_WEAK_0_INTERNAL_FIELDS) {
+ if (node->weakness_type() == PHANTOM_WEAK) {
*(node->location()) = Smi::FromInt(0);
} else if (node->weakness_type() == NORMAL_WEAK) {
v->VisitPointer(node->location());
} else {
- DCHECK(node->weakness_type() == PHANTOM_WEAK_1_INTERNAL_FIELDS ||
- node->weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
+ DCHECK(node->weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
}
} else {
// Node is not pending, so that means the object survived. We still
@@ -692,13 +680,12 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
DCHECK(node->is_in_new_space_list());
if ((node->is_independent() || node->is_partially_dependent()) &&
node->IsWeakRetainer()) {
- if (node->weakness_type() == PHANTOM_WEAK_0_INTERNAL_FIELDS) {
+ if (node->weakness_type() == PHANTOM_WEAK) {
*(node->location()) = Smi::FromInt(0);
} else if (node->weakness_type() == NORMAL_WEAK) {
v->VisitPointer(node->location());
} else {
- DCHECK(node->weakness_type() == PHANTOM_WEAK_1_INTERNAL_FIELDS ||
- node->weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
+ DCHECK(node->weakness_type() == PHANTOM_WEAK_2_INTERNAL_FIELDS);
// For this case we only need to trace if it's alive: The tracing of
// something that is already alive is just to get the pointer updated
// to the new location of the object).
@@ -841,17 +828,50 @@ void GlobalHandles::UpdateListOfNewSpaceNodes() {
int GlobalHandles::DispatchPendingPhantomCallbacks() {
int freed_nodes = 0;
+ {
+ // The initial pass callbacks must simply clear the nodes.
+ for (auto i = pending_phantom_callbacks_.begin();
+ i != pending_phantom_callbacks_.end(); ++i) {
+ auto callback = i;
+ // Skip callbacks that have already been processed once.
+ if (callback->node() == nullptr) continue;
+ callback->Invoke(isolate());
+ freed_nodes++;
+ }
+ }
+ // The second pass empties the list.
while (pending_phantom_callbacks_.length() != 0) {
- PendingPhantomCallback callback = pending_phantom_callbacks_.RemoveLast();
- DCHECK(callback.node()->IsInUse());
- callback.invoke();
- DCHECK(!callback.node()->IsInUse());
- freed_nodes++;
+ auto callback = pending_phantom_callbacks_.RemoveLast();
+ DCHECK(callback.node() == nullptr);
+ // No second pass callback required.
+ if (callback.callback() == nullptr) continue;
+ // Fire second pass callback.
+ callback.Invoke(isolate());
}
return freed_nodes;
}
+void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate) {
+ Data::Callback* callback_addr = nullptr;
+ if (node_ != nullptr) {
+ // Initialize for first pass callback.
+ DCHECK(node_->state() == Node::NEAR_DEATH);
+ callback_addr = &callback_;
+ }
+ Data data(reinterpret_cast<v8::Isolate*>(isolate), parameter_,
+ internal_fields_, callback_addr);
+ Data::Callback callback = callback_;
+ callback_ = nullptr;
+ callback(data);
+ if (node_ != nullptr) {
+ // Transition to second pass state.
+ DCHECK(node_->state() == Node::FREE);
+ node_ = nullptr;
+ }
+}
+
+
int GlobalHandles::PostGarbageCollectionProcessing(GarbageCollector collector) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
@@ -859,6 +879,12 @@ int GlobalHandles::PostGarbageCollectionProcessing(GarbageCollector collector) {
DCHECK(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
const int initial_post_gc_processing_count = ++post_gc_processing_count_;
int freed_nodes = 0;
+ freed_nodes += DispatchPendingPhantomCallbacks();
+ if (initial_post_gc_processing_count != post_gc_processing_count_) {
+ // If the callbacks caused a nested GC, then return. See comment in
+ // PostScavengeProcessing.
+ return freed_nodes;
+ }
if (collector == SCAVENGER) {
freed_nodes = PostScavengeProcessing(initial_post_gc_processing_count);
} else {
@@ -869,7 +895,6 @@ int GlobalHandles::PostGarbageCollectionProcessing(GarbageCollector collector) {
// PostScavengeProcessing.
return freed_nodes;
}
- freed_nodes += DispatchPendingPhantomCallbacks();
if (initial_post_gc_processing_count == post_gc_processing_count_) {
UpdateListOfNewSpaceNodes();
}
@@ -877,14 +902,6 @@ int GlobalHandles::PostGarbageCollectionProcessing(GarbageCollector collector) {
}
-void GlobalHandles::PendingPhantomCallback::invoke() {
- if (node_->state() == Node::FREE) return;
- DCHECK(node_->state() == Node::NEAR_DEATH);
- callback_(data_);
- if (node_->state() != Node::FREE) node_->Release();
-}
-
-
void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsStrongRetainer()) {
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 767989c77e..cb5619ffbe 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -100,12 +100,11 @@ struct ObjectGroupRetainerInfo {
enum WeaknessType {
NORMAL_WEAK, // Embedder gets a handle to the dying object.
// In the following cases, the embedder gets the parameter they passed in
- // earlier, and the 0, 1 or 2 first internal fields. Note that the internal
+ // earlier, and 0 or 2 first internal fields. Note that the internal
// fields must contain aligned non-V8 pointers. Getting pointers to V8
// objects through this interface would be GC unsafe so in that case the
// embedder gets a null pointer instead.
- PHANTOM_WEAK_0_INTERNAL_FIELDS,
- PHANTOM_WEAK_1_INTERNAL_FIELDS,
+ PHANTOM_WEAK,
PHANTOM_WEAK_2_INTERNAL_FIELDS
};
@@ -145,9 +144,9 @@ class GlobalHandles {
// It would be nice to template this one, but it's really hard to get
// the template instantiator to work right if you do.
- static void MakePhantom(Object** location, void* parameter,
- int number_of_internal_fields,
- PhantomCallbackData<void>::Callback weak_callback);
+ static void MakeWeak(Object** location, void* parameter,
+ WeakCallbackInfo<void>::Callback weak_callback,
+ v8::WeakCallbackType type);
void RecordStats(HeapStats* stats);
@@ -349,18 +348,26 @@ class GlobalHandles {
class GlobalHandles::PendingPhantomCallback {
public:
- typedef PhantomCallbackData<void> Data;
- PendingPhantomCallback(Node* node, Data data, Data::Callback callback)
- : node_(node), data_(data), callback_(callback) {}
+ typedef v8::WeakCallbackInfo<void> Data;
+ PendingPhantomCallback(
+ Node* node, Data::Callback callback, void* parameter,
+ void* internal_fields[v8::kInternalFieldsInWeakCallback])
+ : node_(node), callback_(callback), parameter_(parameter) {
+ for (int i = 0; i < v8::kInternalFieldsInWeakCallback; ++i) {
+ internal_fields_[i] = internal_fields[i];
+ }
+ }
- void invoke();
+ void Invoke(Isolate* isolate);
Node* node() { return node_; }
+ Data::Callback callback() { return callback_; }
private:
Node* node_;
- Data data_;
Data::Callback callback_;
+ void* parameter_;
+ void* internal_fields_[v8::kInternalFieldsInWeakCallback];
};
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 32396d89ea..e93fa3b07e 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -89,7 +89,7 @@ namespace internal {
// Determine whether double field unboxing feature is enabled.
#if V8_TARGET_ARCH_64_BIT
-#define V8_DOUBLE_FIELDS_UNBOXING 0
+#define V8_DOUBLE_FIELDS_UNBOXING 1
#else
#define V8_DOUBLE_FIELDS_UNBOXING 0
#endif
@@ -408,19 +408,18 @@ typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
// consecutive.
// Keep this enum in sync with the ObjectSpace enum in v8.h
enum AllocationSpace {
- NEW_SPACE, // Semispaces collected with copying collector.
- OLD_POINTER_SPACE, // May contain pointers to new space.
- OLD_DATA_SPACE, // Must not have pointers to new space.
- CODE_SPACE, // No pointers to new space, marked executable.
- MAP_SPACE, // Only and all map objects.
- CELL_SPACE, // Only and all cell objects.
- PROPERTY_CELL_SPACE, // Only and all global property cell objects.
- LO_SPACE, // Promoted large objects.
+ NEW_SPACE, // Semispaces collected with copying collector.
+ OLD_POINTER_SPACE, // May contain pointers to new space.
+ OLD_DATA_SPACE, // Must not have pointers to new space.
+ CODE_SPACE, // No pointers to new space, marked executable.
+ MAP_SPACE, // Only and all map objects.
+ CELL_SPACE, // Only and all cell objects.
+ LO_SPACE, // Promoted large objects.
FIRST_SPACE = NEW_SPACE,
LAST_SPACE = LO_SPACE,
FIRST_PAGED_SPACE = OLD_POINTER_SPACE,
- LAST_PAGED_SPACE = PROPERTY_CELL_SPACE
+ LAST_PAGED_SPACE = CELL_SPACE
};
const int kSpaceTagSize = 3;
const int kSpaceTagMask = (1 << kSpaceTagSize) - 1;
@@ -452,6 +451,13 @@ enum VisitMode {
enum NativesFlag { NOT_NATIVES_CODE, NATIVES_CODE };
+// ParseRestriction is used to restrict the set of valid statements in a
+// unit of compilation. Restriction violations cause a syntax error.
+enum ParseRestriction {
+ NO_PARSE_RESTRICTION, // All expressions are allowed.
+ ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression.
+};
+
// A CodeDesc describes a buffer holding instructions and relocation
// information. The instructions start at the beginning of the buffer
// and grow forward, the relocation information starts at the end of
@@ -696,9 +702,15 @@ enum ScopeType {
ARROW_SCOPE // The top-level scope for an arrow function literal.
};
-
+// The mips architecture prior to revision 5 has inverted encoding for sNaN.
+#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6)) || \
+ (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6))
+const uint32_t kHoleNanUpper32 = 0xFFFF7FFF;
+const uint32_t kHoleNanLower32 = 0xFFFF7FFF;
+#else
const uint32_t kHoleNanUpper32 = 0xFFF7FFFF;
const uint32_t kHoleNanLower32 = 0xFFF7FFFF;
+#endif
const uint64_t kHoleNanInt64 =
(static_cast<uint64_t>(kHoleNanUpper32) << 32) | kHoleNanLower32;
@@ -711,10 +723,12 @@ enum VariableMode {
CONST_LEGACY, // declared via legacy 'const' declarations
- LET, // declared via 'let' declarations
+ LET, // declared via 'let' declarations (first lexical)
CONST, // declared via 'const' declarations
+ IMPORT, // declared via 'import' declarations (last lexical)
+
// Variables introduced by the compiler:
INTERNAL, // like VAR, but not user-visible (may or may not
// be in a context)
@@ -742,17 +756,17 @@ inline bool IsDynamicVariableMode(VariableMode mode) {
inline bool IsDeclaredVariableMode(VariableMode mode) {
- return mode >= VAR && mode <= CONST;
+ return mode >= VAR && mode <= IMPORT;
}
inline bool IsLexicalVariableMode(VariableMode mode) {
- return mode == LET || mode == CONST;
+ return mode >= LET && mode <= IMPORT;
}
inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || mode == CONST_LEGACY;
+ return mode == CONST || mode == CONST_LEGACY || mode == IMPORT;
}
@@ -796,6 +810,10 @@ enum InitializationFlag {
enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };
+// Serialized in PreparseData, so numeric values should not be changed.
+enum ParseErrorType { kSyntaxError = 0, kReferenceError = 1 };
+
+
enum ClearExceptionFlag {
KEEP_EXCEPTION,
CLEAR_EXCEPTION
@@ -821,8 +839,13 @@ enum FunctionKind {
kDefaultConstructor = 1 << 4,
kSubclassConstructor = 1 << 5,
kBaseConstructor = 1 << 6,
+ kInObjectLiteral = 1 << 7,
kDefaultBaseConstructor = kDefaultConstructor | kBaseConstructor,
- kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor
+ kDefaultSubclassConstructor = kDefaultConstructor | kSubclassConstructor,
+ kConciseMethodInObjectLiteral = kConciseMethod | kInObjectLiteral,
+ kConciseGeneratorMethodInObjectLiteral =
+ kConciseGeneratorMethod | kInObjectLiteral,
+ kAccessorFunctionInObjectLiteral = kAccessorFunction | kInObjectLiteral,
};
@@ -836,7 +859,10 @@ inline bool IsValidFunctionKind(FunctionKind kind) {
kind == FunctionKind::kDefaultBaseConstructor ||
kind == FunctionKind::kDefaultSubclassConstructor ||
kind == FunctionKind::kBaseConstructor ||
- kind == FunctionKind::kSubclassConstructor;
+ kind == FunctionKind::kSubclassConstructor ||
+ kind == FunctionKind::kConciseMethodInObjectLiteral ||
+ kind == FunctionKind::kConciseGeneratorMethodInObjectLiteral ||
+ kind == FunctionKind::kAccessorFunctionInObjectLiteral;
}
@@ -888,6 +914,19 @@ inline bool IsConstructor(FunctionKind kind) {
(FunctionKind::kBaseConstructor | FunctionKind::kSubclassConstructor |
FunctionKind::kDefaultConstructor);
}
+
+
+inline bool IsInObjectLiteral(FunctionKind kind) {
+ DCHECK(IsValidFunctionKind(kind));
+ return kind & FunctionKind::kInObjectLiteral;
+}
+
+
+inline FunctionKind WithObjectLiteralBit(FunctionKind kind) {
+ kind = static_cast<FunctionKind>(kind | FunctionKind::kInObjectLiteral);
+ DCHECK(IsValidFunctionKind(kind));
+ return kind;
+}
} } // namespace v8::internal
namespace i = v8::internal;
diff --git a/deps/v8/src/harmony-array.js b/deps/v8/src/harmony-array.js
index 72625a57f5..5152a557dd 100644
--- a/deps/v8/src/harmony-array.js
+++ b/deps/v8/src/harmony-array.js
@@ -211,7 +211,7 @@ function ArrayOf() {
function HarmonyArrayExtendSymbolPrototype() {
%CheckIsBootstrapping();
- InstallConstants($Symbol, $Array(
+ InstallConstants(global.Symbol, $Array(
// TODO(dslomov, caitp): Move to symbol.js when shipping
"isConcatSpreadable", symbolIsConcatSpreadable
));
diff --git a/deps/v8/src/harmony-reflect.js b/deps/v8/src/harmony-reflect.js
new file mode 100644
index 0000000000..f900d70f29
--- /dev/null
+++ b/deps/v8/src/harmony-reflect.js
@@ -0,0 +1,18 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+var $Reflect = global.Reflect;
+
+function SetUpReflect() {
+ %CheckIsBootstrapping();
+
+ InstallFunctions($Reflect, DONT_ENUM, $Array(
+ "apply", ReflectApply,
+ "construct", ReflectConstruct
+ ));
+}
+
+SetUpReflect();
diff --git a/deps/v8/src/harmony-string.js b/deps/v8/src/harmony-string.js
deleted file mode 100644
index 6bbe139e87..0000000000
--- a/deps/v8/src/harmony-string.js
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-"use strict";
-
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// var $String = global.String;
-// var $Array = global.Array;
-
-// -------------------------------------------------------------------
-
-// ES6 draft 01-20-14, section 21.1.3.13
-function StringRepeat(count) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
-
- var s = TO_STRING_INLINE(this);
- var n = ToInteger(count);
- // The maximum string length is stored in a smi, so a longer repeat
- // must result in a range error.
- if (n < 0 || n > %_MaxSmi()) {
- throw MakeRangeError("invalid_count_value", []);
- }
-
- var r = "";
- while (true) {
- if (n & 1) r += s;
- n >>= 1;
- if (n === 0) return r;
- s += s;
- }
-}
-
-
-// ES6 draft 04-05-14, section 21.1.3.18
-function StringStartsWith(searchString /* position */) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith");
-
- var s = TO_STRING_INLINE(this);
-
- if (IS_REGEXP(searchString)) {
- throw MakeTypeError("first_argument_not_regexp",
- ["String.prototype.startsWith"]);
- }
-
- var ss = TO_STRING_INLINE(searchString);
- var pos = 0;
- if (%_ArgumentsLength() > 1) {
- pos = %_Arguments(1); // position
- pos = ToInteger(pos);
- }
-
- var s_len = s.length;
- var start = MathMin(MathMax(pos, 0), s_len);
- var ss_len = ss.length;
- if (ss_len + start > s_len) {
- return false;
- }
-
- return %StringIndexOf(s, ss, start) === start;
-}
-
-
-// ES6 draft 04-05-14, section 21.1.3.7
-function StringEndsWith(searchString /* position */) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith");
-
- var s = TO_STRING_INLINE(this);
-
- if (IS_REGEXP(searchString)) {
- throw MakeTypeError("first_argument_not_regexp",
- ["String.prototype.endsWith"]);
- }
-
- var ss = TO_STRING_INLINE(searchString);
- var s_len = s.length;
- var pos = s_len;
- if (%_ArgumentsLength() > 1) {
- var arg = %_Arguments(1); // position
- if (!IS_UNDEFINED(arg)) {
- pos = ToInteger(arg);
- }
- }
-
- var end = MathMin(MathMax(pos, 0), s_len);
- var ss_len = ss.length;
- var start = end - ss_len;
- if (start < 0) {
- return false;
- }
-
- return %StringLastIndexOf(s, ss, start) === start;
-}
-
-
-// ES6 draft 04-05-14, section 21.1.3.6
-function StringIncludes(searchString /* position */) { // length == 1
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.includes");
-
- var s = TO_STRING_INLINE(this);
-
- if (IS_REGEXP(searchString)) {
- throw MakeTypeError("first_argument_not_regexp",
- ["String.prototype.includes"]);
- }
-
- var ss = TO_STRING_INLINE(searchString);
- var pos = 0;
- if (%_ArgumentsLength() > 1) {
- pos = %_Arguments(1); // position
- pos = ToInteger(pos);
- }
-
- var s_len = s.length;
- var start = MathMin(MathMax(pos, 0), s_len);
- var ss_len = ss.length;
- if (ss_len + start > s_len) {
- return false;
- }
-
- return %StringIndexOf(s, ss, start) !== -1;
-}
-
-
-// ES6 Draft 05-22-2014, section 21.1.3.3
-function StringCodePointAt(pos) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.codePointAt");
-
- var string = TO_STRING_INLINE(this);
- var size = string.length;
- pos = TO_INTEGER(pos);
- if (pos < 0 || pos >= size) {
- return UNDEFINED;
- }
- var first = %_StringCharCodeAt(string, pos);
- if (first < 0xD800 || first > 0xDBFF || pos + 1 == size) {
- return first;
- }
- var second = %_StringCharCodeAt(string, pos + 1);
- if (second < 0xDC00 || second > 0xDFFF) {
- return first;
- }
- return (first - 0xD800) * 0x400 + second + 0x2400;
-}
-
-
-// ES6 Draft 05-22-2014, section 21.1.2.2
-function StringFromCodePoint(_) { // length = 1
- var code;
- var length = %_ArgumentsLength();
- var index;
- var result = "";
- for (index = 0; index < length; index++) {
- code = %_Arguments(index);
- if (!%_IsSmi(code)) {
- code = ToNumber(code);
- }
- if (code < 0 || code > 0x10FFFF || code !== TO_INTEGER(code)) {
- throw MakeRangeError("invalid_code_point", [code]);
- }
- if (code <= 0xFFFF) {
- result += %_StringCharFromCode(code);
- } else {
- code -= 0x10000;
- result += %_StringCharFromCode((code >>> 10) & 0x3FF | 0xD800);
- result += %_StringCharFromCode(code & 0x3FF | 0xDC00);
- }
- }
- return result;
-}
-
-
-// -------------------------------------------------------------------
-
-function ExtendStringPrototype() {
- %CheckIsBootstrapping();
-
- // Set up the non-enumerable functions on the String object.
- InstallFunctions($String, DONT_ENUM, $Array(
- "fromCodePoint", StringFromCodePoint
- ));
-
- // Set up the non-enumerable functions on the String prototype object.
- InstallFunctions($String.prototype, DONT_ENUM, $Array(
- "codePointAt", StringCodePointAt,
- "includes", StringIncludes,
- "endsWith", StringEndsWith,
- "repeat", StringRepeat,
- "startsWith", StringStartsWith
- ));
-}
-
-ExtendStringPrototype();
diff --git a/deps/v8/src/harmony-tostring.js b/deps/v8/src/harmony-tostring.js
index aed8ca0399..4f4f986fd2 100644
--- a/deps/v8/src/harmony-tostring.js
+++ b/deps/v8/src/harmony-tostring.js
@@ -7,7 +7,6 @@
// This file relies on the fact that the following declaration has been made
// in runtime.js and symbol.js:
// var $Object = global.Object;
-// var $Symbol = global.Symbol;
DefaultObjectToString = ObjectToStringHarmony;
// ES6 draft 08-24-14, section 19.1.3.6
@@ -26,7 +25,7 @@ function ObjectToStringHarmony() {
function HarmonyToStringExtendSymbolPrototype() {
%CheckIsBootstrapping();
- InstallConstants($Symbol, $Array(
+ InstallConstants(global.Symbol, $Array(
// TODO(dslomov, caitp): Move to symbol.js when shipping
"toStringTag", symbolToStringTag
));
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index d86ce5ec32..0fe50340e7 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -15,7 +15,6 @@ namespace internal {
HeapProfiler::HeapProfiler(Heap* heap)
: ids_(new HeapObjectsMap(heap)),
names_(new StringsStorage(heap)),
- next_snapshot_uid_(1),
is_tracking_object_moves_(false) {
}
@@ -63,10 +62,9 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
HeapSnapshot* HeapProfiler::TakeSnapshot(
- const char* name,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver) {
- HeapSnapshot* result = new HeapSnapshot(this, name, next_snapshot_uid_++);
+ HeapSnapshot* result = new HeapSnapshot(this);
{
HeapSnapshotGenerator generator(result, control, resolver, heap());
if (!generator.GenerateSnapshot()) {
@@ -82,14 +80,6 @@ HeapSnapshot* HeapProfiler::TakeSnapshot(
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(
- String* name,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver) {
- return TakeSnapshot(names_->GetName(name), control, resolver);
-}
-
-
void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
ids_->UpdateHeapObjectsMap();
is_tracking_object_moves_ = true;
@@ -101,8 +91,9 @@ void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
}
-SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream) {
- return ids_->PushHeapObjectsStats(stream);
+SnapshotObjectId HeapProfiler::PushHeapObjectsStats(OutputStream* stream,
+ int64_t* timestamp_us) {
+ return ids_->PushHeapObjectsStats(stream, timestamp_us);
}
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index 4197d4d54c..68e13656c6 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -22,11 +22,6 @@ class HeapProfiler {
size_t GetMemorySizeUsedByProfiler();
HeapSnapshot* TakeSnapshot(
- const char* name,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
- HeapSnapshot* TakeSnapshot(
- String* name,
v8::ActivityControl* control,
v8::HeapProfiler::ObjectNameResolver* resolver);
@@ -38,7 +33,8 @@ class HeapProfiler {
HeapObjectsMap* heap_object_map() const { return ids_.get(); }
StringsStorage* names() const { return names_.get(); }
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
+ SnapshotObjectId PushHeapObjectsStats(OutputStream* stream,
+ int64_t* timestamp_us);
int GetSnapshotsCount();
HeapSnapshot* GetSnapshot(int index);
SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
@@ -73,7 +69,6 @@ class HeapProfiler {
SmartPointer<HeapObjectsMap> ids_;
List<HeapSnapshot*> snapshots_;
SmartPointer<StringsStorage> names_;
- unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
SmartPointer<AllocationTracker> allocation_tracker_;
bool is_tracking_object_moves_;
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index 8e185184ce..b8f9ab3d38 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -178,12 +178,8 @@ template <> struct SnapshotSizeConstants<8> {
} // namespace
-HeapSnapshot::HeapSnapshot(HeapProfiler* profiler,
- const char* title,
- unsigned uid)
+HeapSnapshot::HeapSnapshot(HeapProfiler* profiler)
: profiler_(profiler),
- title_(title),
- uid_(uid),
root_index_(HeapEntry::kNoEntry),
gc_roots_index_(HeapEntry::kNoEntry),
max_snapshot_js_object_id_(0) {
@@ -615,7 +611,8 @@ int HeapObjectsMap::FindUntrackedObjects() {
}
-SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
+SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream,
+ int64_t* timestamp_us) {
UpdateHeapObjectsMap();
time_intervals_.Add(TimeInterval(next_id_));
int prefered_chunk_size = stream->GetChunkSize();
@@ -657,6 +654,10 @@ SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
if (result == OutputStream::kAbort) return last_assigned_id();
}
stream->EndOfStream();
+ if (timestamp_us) {
+ *timestamp_us = (time_intervals_.last().timestamp -
+ time_intervals_[0].timestamp).InMicroseconds();
+ }
return last_assigned_id();
}
@@ -1286,39 +1287,31 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
- if (map->HasTransitionArray()) {
- TransitionArray* transitions = map->transitions();
+ Object* raw_transitions = map->raw_transitions();
+ if (TransitionArray::IsFullTransitionArray(raw_transitions)) {
+ TransitionArray* transitions = TransitionArray::cast(raw_transitions);
int transitions_entry = GetEntry(transitions)->index();
- Object* back_pointer = transitions->back_pointer_storage();
- TagObject(back_pointer, "(back pointer)");
- SetInternalReference(transitions, transitions_entry,
- "back_pointer", back_pointer);
if (FLAG_collect_maps && map->CanTransition()) {
- if (!transitions->IsSimpleTransition()) {
- if (transitions->HasPrototypeTransitions()) {
- FixedArray* prototype_transitions =
- transitions->GetPrototypeTransitions();
- MarkAsWeakContainer(prototype_transitions);
- TagObject(prototype_transitions, "(prototype transitions");
- SetInternalReference(transitions, transitions_entry,
- "prototype_transitions", prototype_transitions);
- }
- // TODO(alph): transitions keys are strong links.
- MarkAsWeakContainer(transitions);
+ if (transitions->HasPrototypeTransitions()) {
+ FixedArray* prototype_transitions =
+ transitions->GetPrototypeTransitions();
+ MarkAsWeakContainer(prototype_transitions);
+ TagObject(prototype_transitions, "(prototype transitions");
+ SetInternalReference(transitions, transitions_entry,
+ "prototype_transitions", prototype_transitions);
}
+ // TODO(alph): transitions keys are strong links.
+ MarkAsWeakContainer(transitions);
}
TagObject(transitions, "(transition array)");
- SetInternalReference(map, entry,
- "transitions", transitions,
- Map::kTransitionsOrBackPointerOffset);
- } else {
- Object* back_pointer = map->GetBackPointer();
- TagObject(back_pointer, "(back pointer)");
- SetInternalReference(map, entry,
- "back_pointer", back_pointer,
- Map::kTransitionsOrBackPointerOffset);
+ SetInternalReference(map, entry, "transitions", transitions,
+ Map::kTransitionsOffset);
+ } else if (TransitionArray::IsSimpleTransition(raw_transitions)) {
+ TagObject(raw_transitions, "(transition)");
+ SetInternalReference(map, entry, "transition", raw_transitions,
+ Map::kTransitionsOffset);
}
DescriptorArray* descriptors = map->instance_descriptors();
TagObject(descriptors, "(map descriptors)");
@@ -1332,9 +1325,15 @@ void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) {
Map::kCodeCacheOffset);
SetInternalReference(map, entry,
"prototype", map->prototype(), Map::kPrototypeOffset);
- SetInternalReference(map, entry,
- "constructor", map->constructor(),
- Map::kConstructorOffset);
+ Object* constructor_or_backpointer = map->constructor_or_backpointer();
+ if (constructor_or_backpointer->IsMap()) {
+ TagObject(constructor_or_backpointer, "(back pointer)");
+ SetInternalReference(map, entry, "back_pointer", constructor_or_backpointer,
+ Map::kConstructorOrBackPointerOffset);
+ } else {
+ SetInternalReference(map, entry, "constructor", constructor_or_backpointer,
+ Map::kConstructorOrBackPointerOffset);
+ }
TagObject(map->dependent_code(), "(dependent code)");
MarkAsWeakContainer(map->dependent_code());
SetInternalReference(map, entry,
@@ -1518,9 +1517,8 @@ void V8HeapExplorer::ExtractCellReferences(int entry, Cell* cell) {
void V8HeapExplorer::ExtractPropertyCellReferences(int entry,
PropertyCell* cell) {
- ExtractCellReferences(entry, cell);
- SetInternalReference(cell, entry, "type", cell->type(),
- PropertyCell::kTypeOffset);
+ SetInternalReference(cell, entry, "value", cell->value(),
+ PropertyCell::kValueOffset);
MarkAsWeakContainer(cell->dependent_code());
SetInternalReference(cell, entry, "dependent_code", cell->dependent_code(),
PropertyCell::kDependentCodeOffset);
@@ -2750,6 +2748,11 @@ void HeapSnapshotJSONSerializer::SerializeImpl() {
if (writer_->aborted()) return;
writer_->AddString("],\n");
+ writer_->AddString("\"samples\":[");
+ SerializeSamples();
+ if (writer_->aborted()) return;
+ writer_->AddString("],\n");
+
writer_->AddString("\"strings\":[");
SerializeStrings();
if (writer_->aborted()) return;
@@ -2885,12 +2888,7 @@ void HeapSnapshotJSONSerializer::SerializeNodes() {
void HeapSnapshotJSONSerializer::SerializeSnapshot() {
- writer_->AddString("\"title\":\"");
- writer_->AddString(snapshot_->title());
- writer_->AddString("\"");
- writer_->AddString(",\"uid\":");
- writer_->AddNumber(snapshot_->uid());
- writer_->AddString(",\"meta\":");
+ writer_->AddString("\"meta\":");
// The object describing node serialization layout.
// We use a set of macros to improve readability.
#define JSON_A(s) "[" s "]"
@@ -2951,7 +2949,10 @@ void HeapSnapshotJSONSerializer::SerializeSnapshot() {
JSON_S("function_info_index") ","
JSON_S("count") ","
JSON_S("size") ","
- JSON_S("children"))));
+ JSON_S("children")) ","
+ JSON_S("sample_fields") ":" JSON_A(
+ JSON_S("timestamp_us") ","
+ JSON_S("last_assigned_id"))));
#undef JSON_S
#undef JSON_O
#undef JSON_A
@@ -3040,13 +3041,10 @@ void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
EmbeddedVector<char, kBufferSize> buffer;
const List<AllocationTracker::FunctionInfo*>& list =
tracker->function_info_list();
- bool first_entry = true;
for (int i = 0; i < list.length(); i++) {
AllocationTracker::FunctionInfo* info = list[i];
int buffer_pos = 0;
- if (first_entry) {
- first_entry = false;
- } else {
+ if (i > 0) {
buffer[buffer_pos++] = ',';
}
buffer_pos = utoa(info->function_id, buffer, buffer_pos);
@@ -3069,6 +3067,34 @@ void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
}
+void HeapSnapshotJSONSerializer::SerializeSamples() {
+ const List<HeapObjectsMap::TimeInterval>& samples =
+ snapshot_->profiler()->heap_object_map()->samples();
+ if (samples.is_empty()) return;
+ base::TimeTicks start_time = samples[0].timestamp;
+ // The buffer needs space for 2 unsigned ints, 2 commas, \n and \0
+ const int kBufferSize = MaxDecimalDigitsIn<sizeof(
+ base::TimeDelta().InMicroseconds())>::kUnsigned +
+ MaxDecimalDigitsIn<sizeof(samples[0].id)>::kUnsigned +
+ 2 + 1 + 1;
+ EmbeddedVector<char, kBufferSize> buffer;
+ for (int i = 0; i < samples.length(); i++) {
+ HeapObjectsMap::TimeInterval& sample = samples[i];
+ int buffer_pos = 0;
+ if (i > 0) {
+ buffer[buffer_pos++] = ',';
+ }
+ base::TimeDelta time_delta = sample.timestamp - start_time;
+ buffer_pos = utoa(time_delta.InMicroseconds(), buffer, buffer_pos);
+ buffer[buffer_pos++] = ',';
+ buffer_pos = utoa(sample.last_assigned_id(), buffer, buffer_pos);
+ buffer[buffer_pos++] = '\n';
+ buffer[buffer_pos++] = '\0';
+ writer_->AddString(buffer.start());
+ }
+}
+
+
void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
writer_->AddCharacter('\n');
writer_->AddCharacter('\"');
diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h
index 8aef43739c..5859eb88b5 100644
--- a/deps/v8/src/heap-snapshot-generator.h
+++ b/deps/v8/src/heap-snapshot-generator.h
@@ -5,7 +5,7 @@
#ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_
#define V8_HEAP_SNAPSHOT_GENERATOR_H_
-#include "src/profile-generator-inl.h"
+#include "src/strings-storage.h"
namespace v8 {
namespace internal {
@@ -141,14 +141,10 @@ class HeapEntry BASE_EMBEDDED {
// HeapSnapshotGenerator fills in a HeapSnapshot.
class HeapSnapshot {
public:
- HeapSnapshot(HeapProfiler* profiler,
- const char* title,
- unsigned uid);
+ explicit HeapSnapshot(HeapProfiler* profiler);
void Delete();
HeapProfiler* profiler() { return profiler_; }
- const char* title() { return title_; }
- unsigned uid() { return uid_; }
size_t RawSnapshotSize() const;
HeapEntry* root() { return &entries_[root_index_]; }
HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
@@ -181,8 +177,6 @@ class HeapSnapshot {
HeapEntry* AddGcSubrootEntry(int tag, SnapshotObjectId id);
HeapProfiler* profiler_;
- const char* title_;
- unsigned uid_;
int root_index_;
int gc_roots_index_;
int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
@@ -200,6 +194,16 @@ class HeapSnapshot {
class HeapObjectsMap {
public:
+ struct TimeInterval {
+ explicit TimeInterval(SnapshotObjectId id)
+ : id(id), size(0), count(0), timestamp(base::TimeTicks::Now()) {}
+ SnapshotObjectId last_assigned_id() const { return id - kObjectIdStep; }
+ SnapshotObjectId id;
+ uint32_t size;
+ uint32_t count;
+ base::TimeTicks timestamp;
+ };
+
explicit HeapObjectsMap(Heap* heap);
Heap* heap() const { return heap_; }
@@ -215,7 +219,9 @@ class HeapObjectsMap {
}
void StopHeapObjectsTracking();
- SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
+ SnapshotObjectId PushHeapObjectsStats(OutputStream* stream,
+ int64_t* timestamp_us);
+ const List<TimeInterval>& samples() const { return time_intervals_; }
size_t GetUsedMemorySize() const;
SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
@@ -242,12 +248,6 @@ class HeapObjectsMap {
unsigned int size;
bool accessed;
};
- struct TimeInterval {
- explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { }
- SnapshotObjectId id;
- uint32_t size;
- uint32_t count;
- };
SnapshotObjectId next_id_;
HashMap entries_map_;
@@ -590,6 +590,7 @@ class HeapSnapshotJSONSerializer {
void SerializeTraceTree();
void SerializeTraceNode(AllocationTraceNode* node);
void SerializeTraceNodeInfos();
+ void SerializeSamples();
void SerializeString(const unsigned char* s);
void SerializeStrings();
diff --git a/deps/v8/src/heap/gc-idle-time-handler.cc b/deps/v8/src/heap/gc-idle-time-handler.cc
index ff2a559dd5..5060bc90d1 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.cc
+++ b/deps/v8/src/heap/gc-idle-time-handler.cc
@@ -13,9 +13,10 @@ const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
const size_t GCIdleTimeHandler::kMaxFinalIncrementalMarkCompactTimeInMs = 1000;
const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
-const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
+const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 2;
const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
+const size_t GCIdleTimeHandler::kMinTimeForOverApproximatingWeakClosureInMs = 1;
void GCIdleTimeAction::Print() {
@@ -116,7 +117,7 @@ bool GCIdleTimeHandler::ShouldDoScavenge(
size_t scavenge_speed_in_bytes_per_ms,
size_t new_space_allocation_throughput_in_bytes_per_ms) {
size_t new_space_allocation_limit =
- kMaxFrameRenderingIdleTime * scavenge_speed_in_bytes_per_ms;
+ kMaxScheduledIdleTime * scavenge_speed_in_bytes_per_ms;
// If the limit is larger than the new space size, then scavenging used to be
// really fast. We can take advantage of the whole new space.
@@ -132,8 +133,7 @@ bool GCIdleTimeHandler::ShouldDoScavenge(
} else {
// We have to trigger scavenge before we reach the end of new space.
new_space_allocation_limit -=
- new_space_allocation_throughput_in_bytes_per_ms *
- kMaxFrameRenderingIdleTime;
+ new_space_allocation_throughput_in_bytes_per_ms * kMaxScheduledIdleTime;
}
if (scavenge_speed_in_bytes_per_ms == 0) {
@@ -153,9 +153,10 @@ bool GCIdleTimeHandler::ShouldDoScavenge(
bool GCIdleTimeHandler::ShouldDoMarkCompact(
size_t idle_time_in_ms, size_t size_of_objects,
size_t mark_compact_speed_in_bytes_per_ms) {
- return idle_time_in_ms >=
- EstimateMarkCompactTime(size_of_objects,
- mark_compact_speed_in_bytes_per_ms);
+ return idle_time_in_ms >= kMaxScheduledIdleTime &&
+ idle_time_in_ms >=
+ EstimateMarkCompactTime(size_of_objects,
+ mark_compact_speed_in_bytes_per_ms);
}
@@ -176,11 +177,29 @@ bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
}
+bool GCIdleTimeHandler::ShouldDoOverApproximateWeakClosure(
+ size_t idle_time_in_ms) {
+ // TODO(jochen): Estimate the time it will take to build the object groups.
+ return idle_time_in_ms >= kMinTimeForOverApproximatingWeakClosureInMs;
+}
+
+
+GCIdleTimeAction GCIdleTimeHandler::NothingOrDone() {
+ if (idle_times_which_made_no_progress_since_last_idle_round_ >=
+ kMaxNoProgressIdleTimesPerIdleRound) {
+ return GCIdleTimeAction::Done();
+ } else {
+ idle_times_which_made_no_progress_since_last_idle_round_++;
+ return GCIdleTimeAction::Nothing();
+ }
+}
+
+
// The following logic is implemented by the controller:
// (1) If we don't have any idle time, do nothing, unless a context was
// disposed, incremental marking is stopped, and the heap is small. Then do
// a full GC.
-// (2) If the new space is almost full and we can affort a Scavenge or if the
+// (2) If the new space is almost full and we can afford a Scavenge or if the
// next Scavenge will very likely take long, then a Scavenge is performed.
// (3) If there is currently no MarkCompact idle round going on, we start a
// new idle round if enough garbage was created. Otherwise we do not perform
@@ -229,33 +248,23 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
if (ShouldDoMarkCompact(static_cast<size_t>(idle_time_in_ms),
heap_state.size_of_objects,
heap_state.mark_compact_speed_in_bytes_per_ms)) {
- // If there are no more than two GCs left in this idle round and we are
- // allowed to do a full GC, then make those GCs full in order to compact
- // the code space.
- // TODO(ulan): Once we enable code compaction for incremental marking, we
- // can get rid of this special case and always start incremental marking.
- int remaining_mark_sweeps =
- kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
- if (static_cast<size_t>(idle_time_in_ms) > kMaxFrameRenderingIdleTime &&
- (remaining_mark_sweeps <= 2 ||
- !heap_state.can_start_incremental_marking)) {
- return GCIdleTimeAction::FullGC();
- }
- }
- if (!heap_state.can_start_incremental_marking) {
- return GCIdleTimeAction::Nothing();
+ return GCIdleTimeAction::FullGC();
}
}
+
// TODO(hpayer): Estimate finalize sweeping time.
- if (heap_state.sweeping_in_progress &&
- static_cast<size_t>(idle_time_in_ms) >= kMinTimeForFinalizeSweeping) {
- return GCIdleTimeAction::FinalizeSweeping();
+ if (heap_state.sweeping_in_progress) {
+ if (static_cast<size_t>(idle_time_in_ms) >= kMinTimeForFinalizeSweeping) {
+ return GCIdleTimeAction::FinalizeSweeping();
+ }
+ return NothingOrDone();
}
if (heap_state.incremental_marking_stopped &&
!heap_state.can_start_incremental_marking) {
- return GCIdleTimeAction::Nothing();
+ return NothingOrDone();
}
+
size_t step_size = EstimateMarkingStepSize(
static_cast<size_t>(kIncrementalMarkingStepTimeInMs),
heap_state.incremental_marking_speed_in_bytes_per_ms);
diff --git a/deps/v8/src/heap/gc-idle-time-handler.h b/deps/v8/src/heap/gc-idle-time-handler.h
index 6e7b710c6a..a9b9d76bde 100644
--- a/deps/v8/src/heap/gc-idle-time-handler.h
+++ b/deps/v8/src/heap/gc-idle-time-handler.h
@@ -122,13 +122,13 @@ class GCIdleTimeHandler {
// Number of scavenges that will trigger start of new idle round.
static const int kIdleScavengeThreshold;
- // That is the maximum idle time we will have during frame rendering.
- static const size_t kMaxFrameRenderingIdleTime = 16;
+ // This is the maximum scheduled idle time. Note that it can be more than
+ // 16.66 ms when there is currently no rendering going on.
+ static const size_t kMaxScheduledIdleTime = 50;
- // Minimum idle time to start incremental marking.
- static const size_t kMinIdleTimeToStartIncrementalMarking = 10;
+ // The maximum idle time when frames are rendered is 16.66ms.
+ static const size_t kMaxFrameRenderingIdleTime = 17;
- // If we haven't recorded any scavenger events yet, we use a conservative
// lower bound for the scavenger speed.
static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
@@ -138,6 +138,13 @@ class GCIdleTimeHandler {
// Incremental marking step time.
static const size_t kIncrementalMarkingStepTimeInMs = 1;
+ static const size_t kMinTimeForOverApproximatingWeakClosureInMs;
+
+ // Number of times we will return a Nothing action per Idle round despite
+ // having idle time available before we returning a Done action to ensure we
+ // don't keep scheduling idle tasks and making no progress.
+ static const int kMaxNoProgressIdleTimesPerIdleRound = 10;
+
class HeapState {
public:
void Print();
@@ -159,7 +166,8 @@ class GCIdleTimeHandler {
GCIdleTimeHandler()
: mark_compacts_since_idle_round_started_(0),
- scavenges_since_last_idle_round_(0) {}
+ scavenges_since_last_idle_round_(0),
+ idle_times_which_made_no_progress_since_last_idle_round_(0) {}
GCIdleTimeAction Compute(double idle_time_in_ms, HeapState heap_state);
@@ -195,13 +203,20 @@ class GCIdleTimeHandler {
size_t idle_time_in_ms, size_t size_of_objects,
size_t final_incremental_mark_compact_speed_in_bytes_per_ms);
+ static bool ShouldDoOverApproximateWeakClosure(size_t idle_time_in_ms);
+
static bool ShouldDoScavenge(
size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
size_t scavenger_speed_in_bytes_per_ms,
size_t new_space_allocation_throughput_in_bytes_per_ms);
private:
- void StartIdleRound() { mark_compacts_since_idle_round_started_ = 0; }
+ GCIdleTimeAction NothingOrDone();
+
+ void StartIdleRound() {
+ mark_compacts_since_idle_round_started_ = 0;
+ idle_times_which_made_no_progress_since_last_idle_round_ = 0;
+ }
bool IsMarkCompactIdleRoundFinished() {
return mark_compacts_since_idle_round_started_ ==
kMaxMarkCompactsInIdleRound;
@@ -212,6 +227,7 @@ class GCIdleTimeHandler {
int mark_compacts_since_idle_round_started_;
int scavenges_since_last_idle_round_;
+ int idle_times_which_made_no_progress_since_last_idle_round_;
DISALLOW_COPY_AND_ASSIGN(GCIdleTimeHandler);
};
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index e81829c38d..f18d0896ac 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -8,7 +8,6 @@
#include <cmath>
#include "src/base/platform/platform.h"
-#include "src/cpu-profiler.h"
#include "src/heap/heap.h"
#include "src/heap/store-buffer.h"
#include "src/heap/store-buffer-inl.h"
@@ -199,8 +198,6 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
} else if (CELL_SPACE == space) {
allocation = cell_space_->AllocateRaw(size_in_bytes);
- } else if (PROPERTY_CELL_SPACE == space) {
- allocation = property_cell_space_->AllocateRaw(size_in_bytes);
} else {
DCHECK(MAP_SPACE == space);
allocation = map_space_->AllocateRaw(size_in_bytes);
@@ -242,13 +239,9 @@ void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
heap_profiler->ObjectMoveEvent(source->address(), target->address(),
size_in_bytes);
}
-
- if (isolate_->logger()->is_logging_code_events() ||
- isolate_->cpu_profiler()->is_profiling()) {
- if (target->IsSharedFunctionInfo()) {
- PROFILE(isolate_, SharedFunctionInfoMoveEvent(source->address(),
- target->address()));
- }
+ if (target->IsSharedFunctionInfo()) {
+ LOG_CODE_EVENT(isolate_, SharedFunctionInfoMoveEvent(source->address(),
+ target->address()));
}
if (FLAG_verify_predictable) {
@@ -400,7 +393,6 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
DCHECK(type != CODE_TYPE);
DCHECK(type != ODDBALL_TYPE);
DCHECK(type != CELL_TYPE);
- DCHECK(type != PROPERTY_CELL_TYPE);
if (type <= LAST_NAME_TYPE) {
if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE;
@@ -448,7 +440,6 @@ bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
return dst == src && type == CODE_TYPE;
case MAP_SPACE:
case CELL_SPACE:
- case PROPERTY_CELL_SPACE:
case LO_SPACE:
return false;
}
@@ -556,6 +547,8 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
if (first_word.IsForwardingAddress()) {
HeapObject* dest = first_word.ToForwardingAddress();
DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
+ // TODO(jochen): Remove again after fixing http://crbug.com/452095
+ CHECK((*p)->IsHeapObject() && dest->IsHeapObject());
*p = dest;
return;
}
@@ -707,18 +700,12 @@ void Heap::CompletelyClearInstanceofCache() {
AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
: heap_(isolate->heap()), daf_(isolate) {
- // We shouldn't hit any nested scopes, because that requires
- // non-handle code to call handle code. The code still works but
- // performance will degrade, so we want to catch this situation
- // in debug mode.
- DCHECK(heap_->always_allocate_scope_depth_ == 0);
heap_->always_allocate_scope_depth_++;
}
AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_depth_--;
- DCHECK(heap_->always_allocate_scope_depth_ == 0);
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 002b8cee30..157024939d 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -25,11 +25,11 @@
#include "src/heap/store-buffer.h"
#include "src/heap-profiler.h"
#include "src/isolate-inl.h"
-#include "src/natives.h"
#include "src/runtime-profiler.h"
#include "src/scopeinfo.h"
-#include "src/serialize.h"
-#include "src/snapshot.h"
+#include "src/snapshot/natives.h"
+#include "src/snapshot/serialize.h"
+#include "src/snapshot/snapshot.h"
#include "src/utils.h"
#include "src/v8threads.h"
#include "src/vm-state-inl.h"
@@ -89,7 +89,6 @@ Heap::Heap()
code_space_(NULL),
map_space_(NULL),
cell_space_(NULL),
- property_cell_space_(NULL),
lo_space_(NULL),
gc_state_(NOT_IN_GC),
gc_post_processing_depth_(0),
@@ -144,7 +143,10 @@ Heap::Heap()
external_string_table_(this),
chunks_queued_for_free_(NULL),
gc_callbacks_depth_(0),
- deserialization_complete_(false) {
+ deserialization_complete_(false),
+ concurrent_sweeping_enabled_(false),
+ migration_failure_(false),
+ previous_migration_failure_(false) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -174,18 +176,24 @@ intptr_t Heap::Capacity() {
return new_space_.Capacity() + old_pointer_space_->Capacity() +
old_data_space_->Capacity() + code_space_->Capacity() +
- map_space_->Capacity() + cell_space_->Capacity() +
- property_cell_space_->Capacity();
+ map_space_->Capacity() + cell_space_->Capacity();
}
-intptr_t Heap::CommittedMemory() {
+intptr_t Heap::CommittedOldGenerationMemory() {
if (!HasBeenSetUp()) return 0;
- return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
+ return old_pointer_space_->CommittedMemory() +
old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
- property_cell_space_->CommittedMemory() + lo_space_->Size();
+ lo_space_->Size();
+}
+
+
+intptr_t Heap::CommittedMemory() {
+ if (!HasBeenSetUp()) return 0;
+
+ return new_space_.CommittedMemory() + CommittedOldGenerationMemory();
}
@@ -198,7 +206,6 @@ size_t Heap::CommittedPhysicalMemory() {
code_space_->CommittedPhysicalMemory() +
map_space_->CommittedPhysicalMemory() +
cell_space_->CommittedPhysicalMemory() +
- property_cell_space_->CommittedPhysicalMemory() +
lo_space_->CommittedPhysicalMemory();
}
@@ -225,15 +232,14 @@ intptr_t Heap::Available() {
return new_space_.Available() + old_pointer_space_->Available() +
old_data_space_->Available() + code_space_->Available() +
- map_space_->Available() + cell_space_->Available() +
- property_cell_space_->Available();
+ map_space_->Available() + cell_space_->Available();
}
bool Heap::HasBeenSetUp() {
return old_pointer_space_ != NULL && old_data_space_ != NULL &&
code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
- property_cell_space_ != NULL && lo_space_ != NULL;
+ lo_space_ != NULL;
}
@@ -373,14 +379,6 @@ void Heap::PrintShortHeapStatistics() {
", committed: %6" V8_PTR_PREFIX "d KB\n",
cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
cell_space_->CommittedMemory() / KB);
- PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
- "d KB"
- ", available: %6" V8_PTR_PREFIX
- "d KB"
- ", committed: %6" V8_PTR_PREFIX "d KB\n",
- property_cell_space_->SizeOfObjects() / KB,
- property_cell_space_->Available() / KB,
- property_cell_space_->CommittedMemory() / KB);
PrintPID("Large object space, used: %6" V8_PTR_PREFIX
"d KB"
", available: %6" V8_PTR_PREFIX
@@ -669,9 +667,6 @@ void Heap::GarbageCollectionEpilogue() {
isolate_->counters()->heap_fraction_cell_space()->AddSample(
static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
CommittedMemory()));
- isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
- static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
- CommittedMemory()));
isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
(lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
@@ -683,10 +678,6 @@ void Heap::GarbageCollectionEpilogue() {
static_cast<int>(map_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
static_cast<int>(cell_space()->CommittedMemory() / KB));
- isolate_->counters()
- ->heap_sample_property_cell_space_committed()
- ->AddSample(
- static_cast<int>(property_cell_space()->CommittedMemory() / KB));
isolate_->counters()->heap_sample_code_space_committed()->AddSample(
static_cast<int>(code_space()->CommittedMemory() / KB));
@@ -718,7 +709,6 @@ void Heap::GarbageCollectionEpilogue() {
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
- UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
#undef UPDATE_COUNTERS_FOR_SPACE
#undef UPDATE_FRAGMENTATION_FOR_SPACE
@@ -731,6 +721,13 @@ void Heap::GarbageCollectionEpilogue() {
// Remember the last top pointer so that we can later find out
// whether we allocated in new space since the last GC.
new_space_top_after_last_gc_ = new_space()->top();
+
+ if (migration_failure_) {
+ set_previous_migration_failure(true);
+ } else {
+ set_previous_migration_failure(false);
+ }
+ set_migration_failure(false);
}
@@ -741,8 +738,9 @@ void Heap::HandleGCRequest() {
return;
}
DCHECK(FLAG_overapproximate_weak_closure);
- DCHECK(!incremental_marking()->weak_closure_was_overapproximated());
- OverApproximateWeakClosure("GC interrupt");
+ if (!incremental_marking()->weak_closure_was_overapproximated()) {
+ OverApproximateWeakClosure("GC interrupt");
+ }
}
@@ -751,6 +749,10 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
PrintF("[IncrementalMarking] Overapproximate weak closure (%s).\n",
gc_reason);
}
+
+ GCTracer::Scope gc_scope(tracer(),
+ GCTracer::Scope::MC_INCREMENTAL_WEAKCLOSURE);
+
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
@@ -761,9 +763,7 @@ void Heap::OverApproximateWeakClosure(const char* gc_reason) {
CallGCPrologueCallbacks(kGCTypeMarkSweepCompact, kNoGCCallbackFlags);
}
}
- mark_compact_collector()->OverApproximateWeakClosure();
- incremental_marking()->set_should_hurry(false);
- incremental_marking()->set_weak_closure_was_overapproximated(true);
+ incremental_marking()->MarkObjectGroups();
{
GCCallbacksScope scope(this);
if (scope.CheckReenter()) {
@@ -805,6 +805,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
DisallowHeapAllocation no_recursive_gc;
isolate()->optimizing_compiler_thread()->Flush();
}
+ isolate()->ClearSerializerData();
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
kReduceMemoryFootprintMask);
isolate_->compilation_cache()->Clear();
@@ -829,10 +830,14 @@ void Heap::EnsureFillerObjectAtTop() {
// pointer of the new space page. We store a filler object there to
// identify the unused space.
Address from_top = new_space_.top();
- Address from_limit = new_space_.limit();
- if (from_top < from_limit) {
- int remaining_in_page = static_cast<int>(from_limit - from_top);
- CreateFillerObjectAt(from_top, remaining_in_page);
+ // Check that from_top is inside its page (i.e., not at the end).
+ Address space_end = new_space_.ToSpaceEnd();
+ if (from_top < space_end) {
+ Page* page = Page::FromAddress(from_top);
+ if (page->Contains(from_top)) {
+ int remaining_in_page = static_cast<int>(page->area_end() - from_top);
+ CreateFillerObjectAt(from_top, remaining_in_page);
+ }
}
}
@@ -923,6 +928,7 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
isolate()->optimizing_compiler_thread()->Flush();
}
AgeInlineCaches();
+ set_retained_maps(ArrayList::cast(empty_fixed_array()));
tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
return ++contexts_disposed_;
}
@@ -1365,10 +1371,10 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
object = code_it.Next())
object->Iterate(&v);
- HeapObjectIterator data_it(heap->old_data_space());
- for (HeapObject* object = data_it.Next(); object != NULL;
- object = data_it.Next())
- object->Iterate(&v);
+ HeapObjectIterator data_it(heap->old_data_space());
+ for (HeapObject* object = data_it.Next(); object != NULL;
+ object = data_it.Next())
+ object->Iterate(&v);
}
#endif // VERIFY_HEAP
@@ -1515,6 +1521,10 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
+ // There are soft limits in the allocation code, designed to trigger a mark
+ // sweep collection by failing allocations. There is no sense in trying to
+ // trigger one during scavenge: scavenges allocation should always succeed.
+ AlwaysAllocateScope scope(isolate());
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
@@ -1560,10 +1570,6 @@ void Heap::Scavenge() {
Address new_space_front = new_space_.ToSpaceStart();
promotion_queue_.Initialize();
-#ifdef DEBUG
- store_buffer()->Clean();
-#endif
-
ScavengeVisitor scavenge_visitor(this);
// Copy roots.
IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
@@ -1587,21 +1593,6 @@ void Heap::Scavenge() {
}
}
- // Copy objects reachable from global property cells by scavenging global
- // property cell values directly.
- HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
- for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
- heap_object != NULL;
- heap_object = js_global_property_cell_iterator.Next()) {
- if (heap_object->IsPropertyCell()) {
- PropertyCell* cell = PropertyCell::cast(heap_object);
- Address value_address = cell->ValueAddress();
- scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
- Address type_address = cell->TypeAddress();
- scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
- }
- }
-
// Copy objects reachable from the encountered weak collections list.
scavenge_visitor.VisitPointer(&encountered_weak_collections_);
// Copy objects reachable from the encountered weak cells.
@@ -1730,29 +1721,63 @@ void Heap::UpdateReferencesInExternalStringTable(
void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) {
- ProcessArrayBuffers(retainer);
+ ProcessArrayBuffers(retainer, false);
+ ProcessNewArrayBufferViews(retainer);
ProcessNativeContexts(retainer);
ProcessAllocationSites(retainer);
}
void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) {
- ProcessArrayBuffers(retainer);
+ ProcessArrayBuffers(retainer, true);
+ ProcessNewArrayBufferViews(retainer);
ProcessNativeContexts(retainer);
}
void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
- Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
+ Object* head =
+ VisitWeakList<Context>(this, native_contexts_list(), retainer, false);
// Update the head of the list of contexts.
set_native_contexts_list(head);
}
-void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
- Object* array_buffer_obj =
- VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
+void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer,
+ bool stop_after_young) {
+ Object* array_buffer_obj = VisitWeakList<JSArrayBuffer>(
+ this, array_buffers_list(), retainer, stop_after_young);
set_array_buffers_list(array_buffer_obj);
+
+#ifdef DEBUG
+ // Verify invariant that young array buffers come before old array buffers
+ // in array buffers list if there was no promotion failure.
+ Object* undefined = undefined_value();
+ Object* next = array_buffers_list();
+ bool old_objects_recorded = false;
+ if (migration_failure()) return;
+ while (next != undefined) {
+ if (!old_objects_recorded) {
+ old_objects_recorded = !InNewSpace(next);
+ }
+ DCHECK((InNewSpace(next) && !old_objects_recorded) || !InNewSpace(next));
+ next = JSArrayBuffer::cast(next)->weak_next();
+ }
+#endif
+}
+
+
+void Heap::ProcessNewArrayBufferViews(WeakObjectRetainer* retainer) {
+ // Retain the list of new space views.
+ Object* typed_array_obj = VisitWeakList<JSArrayBufferView>(
+ this, new_array_buffer_views_list_, retainer, false);
+ set_new_array_buffer_views_list(typed_array_obj);
+
+ // Some objects in the list may be in old space now. Find them
+ // and move them to the corresponding array buffer.
+ Object* view = VisitNewArrayBufferViewsWeakList(
+ this, new_array_buffer_views_list_, retainer);
+ set_new_array_buffer_views_list(view);
}
@@ -1768,8 +1793,8 @@ void Heap::TearDownArrayBuffers() {
void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
- Object* allocation_site_obj =
- VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
+ Object* allocation_site_obj = VisitWeakList<AllocationSite>(
+ this, allocation_sites_list(), retainer, false);
set_allocation_sites_list(allocation_site_obj);
}
@@ -1883,6 +1908,18 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// to new space.
DCHECK(!target->IsMap());
Address obj_address = target->address();
+
+ // We are not collecting slots on new space objects during mutation
+ // thus we have to scan for pointers to evacuation candidates when we
+ // promote objects. But we should not record any slots in non-black
+ // objects. Grey object's slots would be rescanned.
+ // White object might not survive until the end of collection
+ // it would be a violation of the invariant to record it's slots.
+ bool record_slots = false;
+ if (incremental_marking()->IsCompacting()) {
+ MarkBit mark_bit = Marking::MarkBitFrom(target);
+ record_slots = Marking::IsBlack(mark_bit);
+ }
#if V8_DOUBLE_FIELDS_UNBOXING
LayoutDescriptorHelper helper(target->map());
bool has_only_tagged_fields = helper.all_fields_tagged();
@@ -1892,15 +1929,15 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
int end_of_region_offset;
if (helper.IsTagged(offset, size, &end_of_region_offset)) {
IterateAndMarkPointersToFromSpace(
- obj_address + offset, obj_address + end_of_region_offset,
- &ScavengeObject);
+ record_slots, obj_address + offset,
+ obj_address + end_of_region_offset, &ScavengeObject);
}
offset = end_of_region_offset;
}
} else {
#endif
- IterateAndMarkPointersToFromSpace(obj_address, obj_address + size,
- &ScavengeObject);
+ IterateAndMarkPointersToFromSpace(
+ record_slots, obj_address, obj_address + size, &ScavengeObject);
#if V8_DOUBLE_FIELDS_UNBOXING
}
#endif
@@ -2111,12 +2148,10 @@ class ScavengingVisitor : public StaticVisitorBase {
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
+ MigrateObject(heap, object, target, object_size);
- // Order is important: slot might be inside of the target if target
- // was allocated over a dead object and slot comes from the store
- // buffer.
+ // Update slot to new target.
*slot = target;
- MigrateObject(heap, object, target, object_size);
heap->IncrementSemiSpaceCopiedObjectSize(object_size);
return true;
@@ -2150,23 +2185,11 @@ class ScavengingVisitor : public StaticVisitorBase {
if (alignment != kObjectAlignment) {
target = EnsureDoubleAligned(heap, target, allocation_size);
}
-
- // Order is important: slot might be inside of the target if target
- // was allocated over a dead object and slot comes from the store
- // buffer.
-
- // Unfortunately, the allocation can also write over the slot if the slot
- // was in free space and the allocation wrote free list data (such as the
- // free list map or entry size) over the slot. We guard against this by
- // checking that the slot still points to the object being moved. This
- // should be sufficient because neither the free list map nor the free
- // list entry size should look like a new space pointer (the former is an
- // old space pointer, the latter is word-aligned).
- if (*slot == object) {
- *slot = target;
- }
MigrateObject(heap, object, target, object_size);
+ // Update slot to new target.
+ *slot = target;
+
if (object_contents == POINTER_OBJECT) {
if (map->instance_type() == JS_FUNCTION_TYPE) {
heap->promotion_queue()->insert(target,
@@ -2195,6 +2218,7 @@ class ScavengingVisitor : public StaticVisitorBase {
if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
return;
}
+ heap->set_migration_failure(true);
}
if (PromoteObject<object_contents, alignment>(map, slot, object,
@@ -2409,6 +2433,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
MapWord first_word = object->map_word();
SLOW_DCHECK(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
+ // TODO(jochen): Remove again after fixing http://crbug.com/452095
+ CHECK((*p)->IsHeapObject() == object->IsHeapObject());
map->GetHeap()->DoScavengeObject(map, p, object);
}
@@ -2450,6 +2476,7 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
Map::OwnsDescriptors::encode(true) |
Map::Counter::encode(Map::kRetainingCounterStart);
reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
+ reinterpret_cast<Map*>(result)->set_weak_cell_cache(Smi::FromInt(0));
return result;
}
@@ -2465,14 +2492,15 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
Map* map = Map::cast(result);
map->set_instance_type(instance_type);
map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
- map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
+ map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER);
map->set_instance_size(instance_size);
map->set_inobject_properties(0);
map->set_pre_allocated_property_fields(0);
map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
- map->init_back_pointer(undefined_value());
+ map->set_weak_cell_cache(Smi::FromInt(0));
+ map->set_raw_transitions(Smi::FromInt(0));
map->set_unused_property_fields(0);
map->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
@@ -2606,7 +2634,7 @@ bool Heap::CreateInitialMaps() {
// Fix the instance_descriptors for the existing maps.
meta_map()->set_code_cache(empty_fixed_array());
meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
- meta_map()->init_back_pointer(undefined_value());
+ meta_map()->set_raw_transitions(Smi::FromInt(0));
meta_map()->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -2615,7 +2643,7 @@ bool Heap::CreateInitialMaps() {
fixed_array_map()->set_code_cache(empty_fixed_array());
fixed_array_map()->set_dependent_code(
DependentCode::cast(empty_fixed_array()));
- fixed_array_map()->init_back_pointer(undefined_value());
+ fixed_array_map()->set_raw_transitions(Smi::FromInt(0));
fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
fixed_array_map()->set_layout_descriptor(
@@ -2624,7 +2652,7 @@ bool Heap::CreateInitialMaps() {
undefined_map()->set_code_cache(empty_fixed_array());
undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
- undefined_map()->init_back_pointer(undefined_value());
+ undefined_map()->set_raw_transitions(Smi::FromInt(0));
undefined_map()->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
undefined_map()->set_layout_descriptor(
@@ -2633,7 +2661,7 @@ bool Heap::CreateInitialMaps() {
null_map()->set_code_cache(empty_fixed_array());
null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
- null_map()->init_back_pointer(undefined_value());
+ null_map()->set_raw_transitions(Smi::FromInt(0));
null_map()->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
@@ -2642,7 +2670,7 @@ bool Heap::CreateInitialMaps() {
constant_pool_array_map()->set_code_cache(empty_fixed_array());
constant_pool_array_map()->set_dependent_code(
DependentCode::cast(empty_fixed_array()));
- constant_pool_array_map()->init_back_pointer(undefined_value());
+ constant_pool_array_map()->set_raw_transitions(Smi::FromInt(0));
constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
if (FLAG_unbox_double_fields) {
constant_pool_array_map()->set_layout_descriptor(
@@ -2651,19 +2679,19 @@ bool Heap::CreateInitialMaps() {
// Fix prototype object for existing maps.
meta_map()->set_prototype(null_value());
- meta_map()->set_constructor(null_value());
+ meta_map()->set_constructor_or_backpointer(null_value());
fixed_array_map()->set_prototype(null_value());
- fixed_array_map()->set_constructor(null_value());
+ fixed_array_map()->set_constructor_or_backpointer(null_value());
undefined_map()->set_prototype(null_value());
- undefined_map()->set_constructor(null_value());
+ undefined_map()->set_constructor_or_backpointer(null_value());
null_map()->set_prototype(null_value());
- null_map()->set_constructor(null_value());
+ null_map()->set_constructor_or_backpointer(null_value());
constant_pool_array_map()->set_prototype(null_value());
- constant_pool_array_map()->set_constructor(null_value());
+ constant_pool_array_map()->set_constructor_or_backpointer(null_value());
{ // Map allocation
#define ALLOCATE_MAP(instance_type, size, field_name) \
@@ -2852,7 +2880,7 @@ AllocationResult Heap::AllocatePropertyCell() {
HeapObject* result;
AllocationResult allocation =
- AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
+ AllocateRaw(size, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(global_property_cell_map());
@@ -2860,7 +2888,6 @@ AllocationResult Heap::AllocatePropertyCell() {
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
cell->set_value(the_hole_value());
- cell->set_type(HeapType::None());
return result;
}
@@ -2868,7 +2895,7 @@ AllocationResult Heap::AllocatePropertyCell() {
AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
int size = WeakCell::kSize;
STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
- HeapObject* result;
+ HeapObject* result = NULL;
{
AllocationResult allocation =
AllocateRaw(size, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
@@ -3093,10 +3120,9 @@ void Heap::CreateInitialObjects() {
set_microtask_queue(empty_fixed_array());
if (FLAG_vector_ics) {
- FeedbackVectorSpec spec(0, 1);
- spec.SetKind(0, Code::KEYED_LOAD_IC);
+ FeedbackVectorSpec spec(0, Code::KEYED_LOAD_IC);
Handle<TypeFeedbackVector> dummy_vector =
- factory->NewTypeFeedbackVector(spec);
+ factory->NewTypeFeedbackVector(&spec);
dummy_vector->Set(FeedbackVectorICSlot(0),
*TypeFeedbackVector::MegamorphicSentinel(isolate()),
SKIP_WRITE_BARRIER);
@@ -3106,6 +3132,7 @@ void Heap::CreateInitialObjects() {
}
set_detached_contexts(empty_fixed_array());
+ set_retained_maps(ArrayList::cast(empty_fixed_array()));
set_weak_object_to_code_table(
*WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
@@ -3140,30 +3167,34 @@ void Heap::CreateInitialObjects() {
bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
- RootListIndex writable_roots[] = {
- kStoreBufferTopRootIndex,
- kStackLimitRootIndex,
- kNumberStringCacheRootIndex,
- kInstanceofCacheFunctionRootIndex,
- kInstanceofCacheMapRootIndex,
- kInstanceofCacheAnswerRootIndex,
- kCodeStubsRootIndex,
- kNonMonomorphicCacheRootIndex,
- kPolymorphicCodeCacheRootIndex,
- kLastScriptIdRootIndex,
- kEmptyScriptRootIndex,
- kRealStackLimitRootIndex,
- kArgumentsAdaptorDeoptPCOffsetRootIndex,
- kConstructStubDeoptPCOffsetRootIndex,
- kGetterStubDeoptPCOffsetRootIndex,
- kSetterStubDeoptPCOffsetRootIndex,
- kStringTableRootIndex,
- };
+ switch (root_index) {
+ case kStoreBufferTopRootIndex:
+ case kNumberStringCacheRootIndex:
+ case kInstanceofCacheFunctionRootIndex:
+ case kInstanceofCacheMapRootIndex:
+ case kInstanceofCacheAnswerRootIndex:
+ case kCodeStubsRootIndex:
+ case kNonMonomorphicCacheRootIndex:
+ case kPolymorphicCodeCacheRootIndex:
+ case kEmptyScriptRootIndex:
+ case kSymbolRegistryRootIndex:
+ case kMaterializedObjectsRootIndex:
+ case kAllocationSitesScratchpadRootIndex:
+ case kMicrotaskQueueRootIndex:
+ case kDetachedContextsRootIndex:
+ case kWeakObjectToCodeTableRootIndex:
+ case kRetainedMapsRootIndex:
+// Smi values
+#define SMI_ENTRY(type, name, Name) case k##Name##RootIndex:
+ SMI_ROOT_LIST(SMI_ENTRY)
+#undef SMI_ENTRY
+ // String table
+ case kStringTableRootIndex:
+ return true;
- for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
- if (root_index == writable_roots[i]) return true;
+ default:
+ return false;
}
- return false;
}
@@ -3500,6 +3531,7 @@ void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) {
+ DCHECK(!object->IsFixedTypedArrayBase());
const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
const int bytes_to_trim = elements_to_trim * element_size;
Map* map = object->map();
@@ -3555,14 +3587,30 @@ void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
template<Heap::InvocationMode mode>
void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
- const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
- const int bytes_to_trim = elements_to_trim * element_size;
+ const int len = object->length();
+ DCHECK(elements_to_trim < len);
+
+ int bytes_to_trim;
+ if (object->IsFixedTypedArrayBase()) {
+ InstanceType type = object->map()->instance_type();
+ bytes_to_trim =
+ FixedTypedArrayBase::TypedArraySize(type, len) -
+ FixedTypedArrayBase::TypedArraySize(type, len - elements_to_trim);
+ } else {
+ const int element_size =
+ object->IsFixedArray() ? kPointerSize : kDoubleSize;
+ bytes_to_trim = elements_to_trim * element_size;
+ }
// For now this trick is only applied to objects in new and paged space.
DCHECK(object->map() != fixed_cow_array_map());
- const int len = object->length();
- DCHECK(elements_to_trim < len);
+ if (bytes_to_trim == 0) {
+ // No need to create filler and update live bytes counters, just initialize
+ // header of the trimmed array.
+ object->synchronized_set_length(len - elements_to_trim);
+ return;
+ }
// Calculate location of new array end.
Address new_end = object->address() + object->Size() - bytes_to_trim;
@@ -3710,7 +3758,7 @@ AllocationResult Heap::CopyCode(Code* code) {
new_constant_pool = empty_constant_pool_array();
}
- HeapObject* result;
+ HeapObject* result = NULL;
// Allocate an object the same size as the code object.
int obj_size = code->Size();
allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
@@ -3850,9 +3898,9 @@ void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
// Pre-allocated fields need to be initialized with undefined_value as well
// so that object accesses before the constructor completes (e.g. in the
// debugger) will not cause a crash.
- if (map->constructor()->IsJSFunction() &&
- JSFunction::cast(map->constructor())
- ->IsInobjectSlackTrackingInProgress()) {
+ Object* constructor = map->GetConstructor();
+ if (constructor->IsJSFunction() &&
+ JSFunction::cast(constructor)->IsInobjectSlackTrackingInProgress()) {
// We might want to shrink the object later.
DCHECK(obj->GetInternalFieldCount() == 0);
filler = Heap::one_pointer_filler_map();
@@ -4439,7 +4487,7 @@ AllocationResult Heap::AllocateExtendedConstantPoolArray(
AllocationResult Heap::AllocateEmptyConstantPoolArray() {
ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
int size = ConstantPoolArray::SizeFor(small);
- HeapObject* result;
+ HeapObject* result = NULL;
{
AllocationResult allocation =
AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
@@ -4455,7 +4503,7 @@ AllocationResult Heap::AllocateSymbol() {
// Statically ensure that it is safe to allocate symbols in paged spaces.
STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
- HeapObject* result;
+ HeapObject* result = NULL;
AllocationResult allocation =
AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
if (!allocation.To(&result)) return allocation;
@@ -4545,11 +4593,20 @@ void Heap::IdleMarkCompact(const char* message) {
bool Heap::TryFinalizeIdleIncrementalMarking(
double idle_time_in_ms, size_t size_of_objects,
size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
- if (incremental_marking()->IsComplete() ||
- (mark_compact_collector_.marking_deque()->IsEmpty() &&
- gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
- static_cast<size_t>(idle_time_in_ms), size_of_objects,
- final_incremental_mark_compact_speed_in_bytes_per_ms))) {
+ if (FLAG_overapproximate_weak_closure &&
+ (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
+ (!incremental_marking()->weak_closure_was_overapproximated() &&
+ mark_compact_collector_.marking_deque()->IsEmpty() &&
+ gc_idle_time_handler_.ShouldDoOverApproximateWeakClosure(
+ static_cast<size_t>(idle_time_in_ms))))) {
+ OverApproximateWeakClosure(
+ "Idle notification: overapproximate weak closure");
+ return true;
+ } else if (incremental_marking()->IsComplete() ||
+ (mark_compact_collector_.marking_deque()->IsEmpty() &&
+ gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
+ static_cast<size_t>(idle_time_in_ms), size_of_objects,
+ final_incremental_mark_compact_speed_in_bytes_per_ms))) {
CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
return true;
}
@@ -4595,13 +4652,14 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
// TODO(ulan): Start incremental marking only for large heaps.
intptr_t limit = old_generation_allocation_limit_;
if (static_cast<size_t>(idle_time_in_ms) >
- GCIdleTimeHandler::kMinIdleTimeToStartIncrementalMarking) {
+ GCIdleTimeHandler::kMaxFrameRenderingIdleTime) {
limit = idle_old_generation_allocation_limit_;
}
heap_state.can_start_incremental_marking =
incremental_marking()->WorthActivating() &&
- NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking;
+ NextGCIsLikelyToBeFull(limit) && FLAG_incremental_marking &&
+ !mark_compact_collector()->sweeping_in_progress();
heap_state.sweeping_in_progress =
mark_compact_collector()->sweeping_in_progress();
heap_state.mark_compact_speed_in_bytes_per_ms =
@@ -4689,6 +4747,7 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
FLAG_trace_idle_notification_verbose) {
+ PrintPID("%8.0f ms: ", isolate()->time_millis_since_init());
PrintF(
"Idle notification: requested idle time %.2f ms, used idle time %.2f "
"ms, deadline usage %.2f ms [",
@@ -4711,7 +4770,7 @@ bool Heap::IdleNotification(double deadline_in_seconds) {
bool Heap::RecentIdleNotificationHappened() {
return (last_idle_notification_time_ +
- GCIdleTimeHandler::kMaxFrameRenderingIdleTime) >
+ GCIdleTimeHandler::kMaxScheduledIdleTime) >
MonotonicallyIncreasingTimeInMs();
}
@@ -4768,8 +4827,6 @@ void Heap::ReportHeapStatistics(const char* title) {
map_space_->ReportStatistics();
PrintF("Cell space : ");
cell_space_->ReportStatistics();
- PrintF("PropertyCell space : ");
- property_cell_space_->ReportStatistics();
PrintF("Large object space : ");
lo_space_->ReportStatistics();
PrintF(">>>>>> ========================================= >>>>>>\n");
@@ -4787,7 +4844,6 @@ bool Heap::Contains(Address addr) {
old_pointer_space_->Contains(addr) ||
old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
map_space_->Contains(addr) || cell_space_->Contains(addr) ||
- property_cell_space_->Contains(addr) ||
lo_space_->SlowContains(addr));
}
@@ -4814,8 +4870,6 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
return map_space_->Contains(addr);
case CELL_SPACE:
return cell_space_->Contains(addr);
- case PROPERTY_CELL_SPACE:
- return property_cell_space_->Contains(addr);
case LO_SPACE:
return lo_space_->SlowContains(addr);
}
@@ -4864,7 +4918,6 @@ void Heap::Verify() {
old_data_space_->Verify(&no_dirty_regions_visitor);
code_space_->Verify(&no_dirty_regions_visitor);
cell_space_->Verify(&no_dirty_regions_visitor);
- property_cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
}
@@ -4884,22 +4937,11 @@ void Heap::ZapFromSpace() {
}
-void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
+void Heap::IterateAndMarkPointersToFromSpace(bool record_slots, Address start,
+ Address end,
ObjectSlotCallback callback) {
Address slot_address = start;
- // We are not collecting slots on new space objects during mutation
- // thus we have to scan for pointers to evacuation candidates when we
- // promote objects. But we should not record any slots in non-black
- // objects. Grey object's slots would be rescanned.
- // White object might not survive until the end of collection
- // it would be a violation of the invariant to record it's slots.
- bool record_slots = false;
- if (incremental_marking()->IsCompacting()) {
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
- record_slots = Marking::IsBlack(mark_bit);
- }
-
while (slot_address < end) {
Object** slot = reinterpret_cast<Object**>(slot_address);
Object* object = *slot;
@@ -4929,143 +4971,6 @@ void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
}
-#ifdef DEBUG
-typedef bool (*CheckStoreBufferFilter)(Object** addr);
-
-
-bool IsAMapPointerAddress(Object** addr) {
- uintptr_t a = reinterpret_cast<uintptr_t>(addr);
- int mod = a % Map::kSize;
- return mod >= Map::kPointerFieldsBeginOffset &&
- mod < Map::kPointerFieldsEndOffset;
-}
-
-
-bool EverythingsAPointer(Object** addr) { return true; }
-
-
-static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
- Object**** store_buffer_position,
- Object*** store_buffer_top,
- CheckStoreBufferFilter filter,
- Address special_garbage_start,
- Address special_garbage_end) {
- Map* free_space_map = heap->free_space_map();
- for (; current < limit; current++) {
- Object* o = *current;
- Address current_address = reinterpret_cast<Address>(current);
- // Skip free space.
- if (o == free_space_map) {
- Address current_address = reinterpret_cast<Address>(current);
- FreeSpace* free_space =
- FreeSpace::cast(HeapObject::FromAddress(current_address));
- int skip = free_space->Size();
- DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
- DCHECK(skip > 0);
- current_address += skip - kPointerSize;
- current = reinterpret_cast<Object**>(current_address);
- continue;
- }
- // Skip the current linear allocation space between top and limit which is
- // unmarked with the free space map, but can contain junk.
- if (current_address == special_garbage_start &&
- special_garbage_end != special_garbage_start) {
- current_address = special_garbage_end - kPointerSize;
- current = reinterpret_cast<Object**>(current_address);
- continue;
- }
- if (!(*filter)(current)) continue;
- DCHECK(current_address < special_garbage_start ||
- current_address >= special_garbage_end);
- DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
- // We have to check that the pointer does not point into new space
- // without trying to cast it to a heap object since the hash field of
- // a string can contain values like 1 and 3 which are tagged null
- // pointers.
- if (!heap->InNewSpace(o)) continue;
- while (**store_buffer_position < current &&
- *store_buffer_position < store_buffer_top) {
- (*store_buffer_position)++;
- }
- if (**store_buffer_position != current ||
- *store_buffer_position == store_buffer_top) {
- Object** obj_start = current;
- while (!(*obj_start)->IsMap()) obj_start--;
- UNREACHABLE();
- }
- }
-}
-
-
-// Check that the store buffer contains all intergenerational pointers by
-// scanning a page and ensuring that all pointers to young space are in the
-// store buffer.
-void Heap::OldPointerSpaceCheckStoreBuffer() {
- OldSpace* space = old_pointer_space();
- PageIterator pages(space);
-
- store_buffer()->SortUniq();
-
- while (pages.has_next()) {
- Page* page = pages.next();
- Object** current = reinterpret_cast<Object**>(page->area_start());
-
- Address end = page->area_end();
-
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
-
- Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this, current, limit, &store_buffer_position,
- store_buffer_top, &EverythingsAPointer, space->top(),
- space->limit());
- }
-}
-
-
-void Heap::MapSpaceCheckStoreBuffer() {
- MapSpace* space = map_space();
- PageIterator pages(space);
-
- store_buffer()->SortUniq();
-
- while (pages.has_next()) {
- Page* page = pages.next();
- Object** current = reinterpret_cast<Object**>(page->area_start());
-
- Address end = page->area_end();
-
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
-
- Object** limit = reinterpret_cast<Object**>(end);
- CheckStoreBuffer(this, current, limit, &store_buffer_position,
- store_buffer_top, &IsAMapPointerAddress, space->top(),
- space->limit());
- }
-}
-
-
-void Heap::LargeObjectSpaceCheckStoreBuffer() {
- LargeObjectIterator it(lo_space());
- for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
- // We only have code, sequential strings, or fixed arrays in large
- // object space, and only fixed arrays can possibly contain pointers to
- // the young generation.
- if (object->IsFixedArray()) {
- Object*** store_buffer_position = store_buffer()->Start();
- Object*** store_buffer_top = store_buffer()->Top();
- Object** current = reinterpret_cast<Object**>(object->address());
- Object** limit =
- reinterpret_cast<Object**>(object->address() + object->Size());
- CheckStoreBuffer(this, current, limit, &store_buffer_position,
- store_buffer_top, &EverythingsAPointer, NULL, NULL);
- }
- }
-}
-#endif
-
-
void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
@@ -5203,7 +5108,7 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
max_semi_space_size_ = Page::kPageSize;
}
- if (Snapshot::HaveASnapshotToStartFrom()) {
+ if (isolate()->snapshot_available()) {
// If we are using a snapshot we always reserve the default amount
// of memory for each semispace because code in the snapshot has
// write-barrier code that relies on the size and alignment of new
@@ -5326,8 +5231,6 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->map_space_capacity = map_space_->Capacity();
*stats->cell_space_size = cell_space_->SizeOfObjects();
*stats->cell_space_capacity = cell_space_->Capacity();
- *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
- *stats->property_cell_space_capacity = property_cell_space_->Capacity();
*stats->lo_space_size = lo_space_->Size();
isolate_->global_handles()->RecordStats(stats);
*stats->memory_allocator_size = isolate()->memory_allocator()->Size();
@@ -5353,7 +5256,7 @@ intptr_t Heap::PromotedSpaceSizeOfObjects() {
return old_pointer_space_->SizeOfObjects() +
old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
- property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
+ lo_space_->SizeOfObjects();
}
@@ -5473,6 +5376,9 @@ bool Heap::SetUp() {
if (!ConfigureHeapDefault()) return false;
}
+ concurrent_sweeping_enabled_ =
+ FLAG_concurrent_sweeping && isolate_->max_available_threads() > 1;
+
base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
MarkMapPointersAsEncoded(false);
@@ -5518,12 +5424,6 @@ bool Heap::SetUp() {
if (cell_space_ == NULL) return false;
if (!cell_space_->SetUp()) return false;
- // Initialize global property cell space.
- property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
- PROPERTY_CELL_SPACE);
- if (property_cell_space_ == NULL) return false;
- if (!property_cell_space_->SetUp()) return false;
-
// The large object code space may contain code or data. We set the memory
// to be non-executable here for safety, but this means we need to enable it
// explicitly when allocating large code objects.
@@ -5564,6 +5464,7 @@ bool Heap::CreateHeapObjects() {
set_native_contexts_list(undefined_value());
set_array_buffers_list(undefined_value());
+ set_new_array_buffer_views_list(undefined_value());
set_allocation_sites_list(undefined_value());
return true;
}
@@ -5637,8 +5538,6 @@ void Heap::TearDown() {
map_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
cell_space_->MaximumCommittedMemory());
- PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
- property_cell_space_->MaximumCommittedMemory());
PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
lo_space_->MaximumCommittedMemory());
PrintF("\n\n");
@@ -5688,12 +5587,6 @@ void Heap::TearDown() {
cell_space_ = NULL;
}
- if (property_cell_space_ != NULL) {
- property_cell_space_->TearDown();
- delete property_cell_space_;
- property_cell_space_ = NULL;
- }
-
if (lo_space_ != NULL) {
lo_space_->TearDown();
delete lo_space_;
@@ -5768,6 +5661,19 @@ DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<HeapObject> obj) {
}
+void Heap::AddRetainedMap(Handle<Map> map) {
+ if (FLAG_retain_maps_for_n_gc == 0) return;
+ Handle<WeakCell> cell = Map::WeakCellForMap(map);
+ Handle<ArrayList> array(retained_maps(), isolate());
+ array = ArrayList::Add(
+ array, cell, handle(Smi::FromInt(FLAG_retain_maps_for_n_gc), isolate()),
+ ArrayList::kReloadLengthAfterAllocation);
+ if (*array != retained_maps()) {
+ set_retained_maps(*array);
+ }
+}
+
+
void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
}
@@ -5807,8 +5713,6 @@ Space* AllSpaces::next() {
return heap_->map_space();
case CELL_SPACE:
return heap_->cell_space();
- case PROPERTY_CELL_SPACE:
- return heap_->property_cell_space();
case LO_SPACE:
return heap_->lo_space();
default:
@@ -5829,8 +5733,6 @@ PagedSpace* PagedSpaces::next() {
return heap_->map_space();
case CELL_SPACE:
return heap_->cell_space();
- case PROPERTY_CELL_SPACE:
- return heap_->property_cell_space();
default:
return NULL;
}
@@ -5917,10 +5819,6 @@ ObjectIterator* SpaceIterator::CreateIterator() {
case CELL_SPACE:
iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
break;
- case PROPERTY_CELL_SPACE:
- iterator_ =
- new HeapObjectIterator(heap_->property_cell_space(), size_func_);
- break;
case LO_SPACE:
iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
break;
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 21eb7e67aa..ee701b2d5a 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -38,7 +38,6 @@ namespace internal {
V(Oddball, true_value, TrueValue) \
V(Oddball, false_value, FalseValue) \
V(Oddball, uninitialized_value, UninitializedValue) \
- V(Oddball, exception, Exception) \
V(Map, cell_map, CellMap) \
V(Map, global_property_cell_map, GlobalPropertyCellMap) \
V(Map, shared_function_info_map, SharedFunctionInfoMap) \
@@ -53,17 +52,20 @@ namespace internal {
V(Map, fixed_double_array_map, FixedDoubleArrayMap) \
V(Map, constant_pool_array_map, ConstantPoolArrayMap) \
V(Map, weak_cell_map, WeakCellMap) \
- V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
- V(Map, hash_table_map, HashTableMap) \
- V(Map, ordered_hash_table_map, OrderedHashTableMap) \
+ V(Map, one_byte_string_map, OneByteStringMap) \
+ V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
+ V(Map, function_context_map, FunctionContextMap) \
V(FixedArray, empty_fixed_array, EmptyFixedArray) \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \
V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray) \
- V(Oddball, arguments_marker, ArgumentsMarker) \
/* The roots above this line should be boring from a GC point of view. */ \
/* This means they are never in new space and never on a page that is */ \
/* being compacted. */ \
+ V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel) \
+ V(Oddball, arguments_marker, ArgumentsMarker) \
+ V(Oddball, exception, Exception) \
+ V(Oddball, termination_exception, TerminationException) \
V(FixedArray, number_string_cache, NumberStringCache) \
V(Object, instanceof_cache_function, InstanceofCacheFunction) \
V(Object, instanceof_cache_map, InstanceofCacheMap) \
@@ -71,13 +73,13 @@ namespace internal {
V(FixedArray, single_character_string_cache, SingleCharacterStringCache) \
V(FixedArray, string_split_cache, StringSplitCache) \
V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \
- V(Oddball, termination_exception, TerminationException) \
V(Smi, hash_seed, HashSeed) \
+ V(Map, hash_table_map, HashTableMap) \
+ V(Map, ordered_hash_table_map, OrderedHashTableMap) \
V(Map, symbol_map, SymbolMap) \
V(Map, string_map, StringMap) \
- V(Map, one_byte_string_map, OneByteStringMap) \
- V(Map, cons_string_map, ConsStringMap) \
V(Map, cons_one_byte_string_map, ConsOneByteStringMap) \
+ V(Map, cons_string_map, ConsStringMap) \
V(Map, sliced_string_map, SlicedStringMap) \
V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap) \
V(Map, external_string_map, ExternalStringMap) \
@@ -89,7 +91,6 @@ namespace internal {
V(Map, short_external_string_with_one_byte_data_map, \
ShortExternalStringWithOneByteDataMap) \
V(Map, internalized_string_map, InternalizedStringMap) \
- V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap) \
V(Map, external_internalized_string_map, ExternalInternalizedStringMap) \
V(Map, external_internalized_string_with_one_byte_data_map, \
ExternalInternalizedStringWithOneByteDataMap) \
@@ -141,7 +142,6 @@ namespace internal {
V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array, \
EmptyFixedUint8ClampedArray) \
V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap) \
- V(Map, function_context_map, FunctionContextMap) \
V(Map, catch_context_map, CatchContextMap) \
V(Map, with_context_map, WithContextMap) \
V(Map, block_context_map, BlockContextMap) \
@@ -159,10 +159,11 @@ namespace internal {
V(Map, termination_exception_map, TerminationExceptionMap) \
V(Map, message_object_map, JSMessageObjectMap) \
V(Map, foreign_map, ForeignMap) \
+ V(Map, neander_map, NeanderMap) \
+ V(Map, external_map, ExternalMap) \
V(HeapNumber, nan_value, NanValue) \
V(HeapNumber, infinity_value, InfinityValue) \
V(HeapNumber, minus_zero_value, MinusZeroValue) \
- V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(UnseededNumberDictionary, code_stubs, CodeStubs) \
V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
@@ -172,9 +173,8 @@ namespace internal {
V(FixedArray, natives_source_cache, NativesSourceCache) \
V(Script, empty_script, EmptyScript) \
V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \
- V(Cell, undefined_cell, UndefineCell) \
+ V(Cell, undefined_cell, UndefinedCell) \
V(JSObject, observation_state, ObservationState) \
- V(Map, external_map, ExternalMap) \
V(Object, symbol_registry, SymbolRegistry) \
V(SeededNumberDictionary, empty_slow_element_dictionary, \
EmptySlowElementDictionary) \
@@ -183,6 +183,7 @@ namespace internal {
V(FixedArray, microtask_queue, MicrotaskQueue) \
V(FixedArray, keyed_load_dummy_vector, KeyedLoadDummyVector) \
V(FixedArray, detached_contexts, DetachedContexts) \
+ V(ArrayList, retained_maps, RetainedMaps) \
V(WeakHashTable, weak_object_to_code_table, WeakObjectToCodeTable)
// Entries in this list are limited to Smis and are not visited during GC.
@@ -195,6 +196,7 @@ namespace internal {
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
+
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
SMI_ROOT_LIST(V) \
@@ -607,6 +609,9 @@ class Heap {
// Returns the amount of memory currently committed for the heap.
intptr_t CommittedMemory();
+ // Returns the amount of memory currently committed for the old space.
+ intptr_t CommittedOldGenerationMemory();
+
// Returns the amount of executable memory currently committed for the heap.
intptr_t CommittedMemoryExecutable();
@@ -645,7 +650,6 @@ class Heap {
OldSpace* code_space() { return code_space_; }
MapSpace* map_space() { return map_space_; }
CellSpace* cell_space() { return cell_space_; }
- PropertyCellSpace* property_cell_space() { return property_cell_space_; }
LargeObjectSpace* lo_space() { return lo_space_; }
PagedSpace* paged_space(int idx) {
switch (idx) {
@@ -657,8 +661,6 @@ class Heap {
return map_space();
case CELL_SPACE:
return cell_space();
- case PROPERTY_CELL_SPACE:
- return property_cell_space();
case CODE_SPACE:
return code_space();
case NEW_SPACE:
@@ -694,6 +696,12 @@ class Heap {
return old_data_space_->allocation_limit_address();
}
+ // TODO(hpayer): There is still a missmatch between capacity and actual
+ // committed memory size.
+ bool CanExpandOldGeneration(int size) {
+ return (CommittedOldGenerationMemory() + size) < MaxOldGenerationSize();
+ }
+
// Returns a deep copy of the JavaScript object.
// Properties and elements are copied too.
// Optionally takes an AllocationSite to be appended in an AllocationMemento.
@@ -863,6 +871,13 @@ class Heap {
void set_array_buffers_list(Object* object) { array_buffers_list_ = object; }
Object* array_buffers_list() const { return array_buffers_list_; }
+ void set_new_array_buffer_views_list(Object* object) {
+ new_array_buffer_views_list_ = object;
+ }
+ Object* new_array_buffer_views_list() const {
+ return new_array_buffer_views_list_;
+ }
+
void set_allocation_sites_list(Object* object) {
allocation_sites_list_ = object;
}
@@ -898,7 +913,8 @@ class Heap {
// Iterate pointers to from semispace of new space found in memory interval
// from start to end.
- void IterateAndMarkPointersToFromSpace(Address start, Address end,
+ void IterateAndMarkPointersToFromSpace(bool record_slots, Address start,
+ Address end,
ObjectSlotCallback callback);
// Returns whether the object resides in new space.
@@ -981,10 +997,6 @@ class Heap {
void Print();
void PrintHandles();
- void OldPointerSpaceCheckStoreBuffer();
- void MapSpaceCheckStoreBuffer();
- void LargeObjectSpaceCheckStoreBuffer();
-
// Report heap statistics.
void ReportHeapStatistics(const char* title);
void ReportCodeStatistics(const char* title);
@@ -1090,7 +1102,13 @@ class Heap {
static const int kInitalOldGenerationLimitFactor = 2;
+#if V8_OS_ANDROID
+ // Don't apply pointer multiplier on Android since it has no swap space and
+ // should instead adapt it's heap size based on available physical memory.
+ static const int kPointerMultiplier = 1;
+#else
static const int kPointerMultiplier = i::kPointerSize / 4;
+#endif
// The new space size has to be a power of 2. Sizes are in MB.
static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
@@ -1308,6 +1326,8 @@ class Heap {
// Returns the current sweep generation.
int sweep_generation() { return sweep_generation_; }
+ bool concurrent_sweeping_enabled() { return concurrent_sweeping_enabled_; }
+
inline Isolate* isolate();
void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
@@ -1450,6 +1470,8 @@ class Heap {
DependentCode* LookupWeakObjectToCodeDependency(Handle<HeapObject> obj);
+ void AddRetainedMap(Handle<Map> map);
+
static void FatalProcessOutOfMemory(const char* location,
bool take_snapshot = false);
@@ -1465,6 +1487,18 @@ class Heap {
bool deserialization_complete() const { return deserialization_complete_; }
+ bool migration_failure() const { return migration_failure_; }
+ void set_migration_failure(bool migration_failure) {
+ migration_failure_ = migration_failure;
+ }
+
+ bool previous_migration_failure() const {
+ return previous_migration_failure_;
+ }
+ void set_previous_migration_failure(bool previous_migration_failure) {
+ previous_migration_failure_ = previous_migration_failure;
+ }
+
protected:
// Methods made available to tests.
@@ -1566,7 +1600,6 @@ class Heap {
OldSpace* code_space_;
MapSpace* map_space_;
CellSpace* cell_space_;
- PropertyCellSpace* property_cell_space_;
LargeObjectSpace* lo_space_;
HeapState gc_state_;
int gc_post_processing_depth_;
@@ -1602,6 +1635,8 @@ class Heap {
inline void set_##name(type* value) { \
/* The deserializer makes use of the fact that these common roots are */ \
/* never in new space and never on a page that is being compacted. */ \
+ DCHECK(!deserialization_complete() || \
+ RootCanBeWrittenAfterInitialization(k##camel_name##RootIndex)); \
DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
roots_[k##camel_name##RootIndex] = value; \
}
@@ -1621,8 +1656,8 @@ class Heap {
// generation and on every allocation in large object space.
intptr_t old_generation_allocation_limit_;
- // The allocation limit when there is > kMinIdleTimeToStartIncrementalMarking
- // idle time in the idle time handler.
+ // The allocation limit when there is >16.66ms idle time in the idle time
+ // handler.
intptr_t idle_old_generation_allocation_limit_;
// Indicates that an allocation has failed in the old generation since the
@@ -1634,11 +1669,16 @@ class Heap {
bool inline_allocation_disabled_;
// Weak list heads, threaded through the objects.
- // List heads are initilized lazily and contain the undefined_value at start.
+ // List heads are initialized lazily and contain the undefined_value at start.
Object* native_contexts_list_;
Object* array_buffers_list_;
Object* allocation_sites_list_;
+ // This is a global list of array buffer views in new space. When the views
+ // get promoted, they are removed form the list and added to the corresponding
+ // array buffer.
+ Object* new_array_buffer_views_list_;
+
// List of encountered weak collections (JSWeakMap and JSWeakSet) during
// marking. It is initialized during marking, destroyed after marking and
// contains Smi(0) while marking is not active.
@@ -1971,7 +2011,8 @@ class Heap {
void MarkCompactEpilogue();
void ProcessNativeContexts(WeakObjectRetainer* retainer);
- void ProcessArrayBuffers(WeakObjectRetainer* retainer);
+ void ProcessArrayBuffers(WeakObjectRetainer* retainer, bool stop_after_young);
+ void ProcessNewArrayBufferViews(WeakObjectRetainer* retainer);
void ProcessAllocationSites(WeakObjectRetainer* retainer);
// Deopts all code that contains allocation instruction which are tenured or
@@ -2131,6 +2172,15 @@ class Heap {
bool deserialization_complete_;
+ bool concurrent_sweeping_enabled_;
+
+ // A migration failure indicates that a semi-space copy of an object during
+ // a scavenge failed and the object got promoted instead.
+ bool migration_failure_;
+
+ // A migration failure happened in the previous scavenge.
+ bool previous_migration_failure_;
+
friend class AlwaysAllocateScope;
friend class Deserializer;
friend class Factory;
@@ -2177,8 +2227,6 @@ class HeapStats {
int* size_per_type; // 22
int* os_error; // 23
int* end_marker; // 24
- intptr_t* property_cell_space_size; // 25
- intptr_t* property_cell_space_capacity; // 26
};
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index dfd3ed2766..83ebbda8cf 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -30,6 +30,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
unscanned_bytes_of_large_object_(0),
was_activated_(false),
weak_closure_was_overapproximated_(false),
+ weak_closure_approximation_rounds_(0),
request_type_(COMPLETE_MARKING) {}
@@ -251,13 +252,7 @@ class IncrementalMarkingMarkingVisitor
// Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, Object* obj)) {
- HeapObject* heap_object = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
- if (mark_bit.data_only()) {
- MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
- } else if (Marking::IsWhite(mark_bit)) {
- heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
- }
+ IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
}
// Marks the object black without pushing it on the marking stack.
@@ -280,7 +275,7 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
public:
explicit IncrementalMarkingRootMarkingVisitor(
IncrementalMarking* incremental_marking)
- : incremental_marking_(incremental_marking) {}
+ : heap_(incremental_marking->heap()) {}
void VisitPointer(Object** p) { MarkObjectByPointer(p); }
@@ -293,18 +288,10 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
Object* obj = *p;
if (!obj->IsHeapObject()) return;
- HeapObject* heap_object = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
- if (mark_bit.data_only()) {
- MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
- } else {
- if (Marking::IsWhite(mark_bit)) {
- incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
- }
- }
+ IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
}
- IncrementalMarking* incremental_marking_;
+ Heap* heap_;
};
@@ -326,7 +313,6 @@ void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
} else if (chunk->owner()->identity() == CELL_SPACE ||
- chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
chunk->scan_on_scavenge()) {
chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
@@ -373,7 +359,6 @@ void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
- DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
@@ -408,7 +393,6 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
ActivateIncrementalWriteBarrier(heap_->old_data_space());
ActivateIncrementalWriteBarrier(heap_->cell_space());
- ActivateIncrementalWriteBarrier(heap_->property_cell_space());
ActivateIncrementalWriteBarrier(heap_->map_space());
ActivateIncrementalWriteBarrier(heap_->code_space());
ActivateIncrementalWriteBarrier(heap_->new_space());
@@ -561,6 +545,35 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
}
+void IncrementalMarking::MarkObjectGroups() {
+ DCHECK(FLAG_overapproximate_weak_closure);
+ DCHECK(!weak_closure_was_overapproximated_);
+
+ int old_marking_deque_top =
+ heap_->mark_compact_collector()->marking_deque()->top();
+
+ heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
+
+ IncrementalMarkingRootMarkingVisitor visitor(this);
+ heap_->isolate()->global_handles()->IterateObjectGroups(
+ &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
+
+ int marking_progress =
+ abs(old_marking_deque_top -
+ heap_->mark_compact_collector()->marking_deque()->top());
+
+ ++weak_closure_approximation_rounds_;
+ if ((weak_closure_approximation_rounds_ >=
+ FLAG_max_object_groups_marking_rounds) ||
+ (marking_progress < FLAG_min_progress_during_object_groups_marking)) {
+ weak_closure_was_overapproximated_ = true;
+ }
+
+ heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
+ heap_->isolate()->global_handles()->RemoveObjectGroups();
+}
+
+
void IncrementalMarking::PrepareForScavenge() {
if (!IsMarking()) return;
NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
@@ -641,6 +654,16 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
}
+void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ if (mark_bit.data_only()) {
+ MarkBlackOrKeepGrey(obj, mark_bit, obj->Size());
+ } else if (Marking::IsWhite(mark_bit)) {
+ heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
+ }
+}
+
+
intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
intptr_t bytes_processed = 0;
Map* filler_map = heap_->one_pointer_filler_map();
@@ -773,15 +796,16 @@ void IncrementalMarking::Finalize() {
}
-void IncrementalMarking::OverApproximateWeakClosure() {
+void IncrementalMarking::OverApproximateWeakClosure(CompletionAction action) {
DCHECK(FLAG_overapproximate_weak_closure);
DCHECK(!weak_closure_was_overapproximated_);
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] requesting weak closure overapproximation.\n");
}
- set_should_hurry(true);
request_type_ = OVERAPPROXIMATION;
- heap_->isolate()->stack_guard()->RequestGC();
+ if (action == GC_VIA_STACK_GUARD) {
+ heap_->isolate()->stack_guard()->RequestGC();
+ }
}
@@ -796,8 +820,8 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Complete (normal).\n");
}
+ request_type_ = COMPLETE_MARKING;
if (action == GC_VIA_STACK_GUARD) {
- request_type_ = COMPLETE_MARKING;
heap_->isolate()->stack_guard()->RequestGC();
}
}
@@ -806,6 +830,7 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
void IncrementalMarking::Epilogue() {
was_activated_ = false;
weak_closure_was_overapproximated_ = false;
+ weak_closure_approximation_rounds_ = 0;
}
@@ -937,7 +962,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
if (state_ == SWEEPING) {
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
(heap_->mark_compact_collector()->IsSweepingCompleted() ||
- !FLAG_concurrent_sweeping)) {
+ !heap_->concurrent_sweeping_enabled())) {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
@@ -950,9 +975,8 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
if (completion == FORCE_COMPLETION ||
IsIdleMarkingDelayCounterLimitReached()) {
if (FLAG_overapproximate_weak_closure &&
- !weak_closure_was_overapproximated_ &&
- action == GC_VIA_STACK_GUARD) {
- OverApproximateWeakClosure();
+ !weak_closure_was_overapproximated_) {
+ OverApproximateWeakClosure(action);
} else {
MarkingComplete(action);
}
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index d6dfe17c7f..7d41cfef41 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -41,7 +41,8 @@ class IncrementalMarking {
bool weak_closure_was_overapproximated() const {
return weak_closure_was_overapproximated_;
}
- void set_weak_closure_was_overapproximated(bool val) {
+
+ void SetWeakClosureWasOverApproximatedForTesting(bool val) {
weak_closure_was_overapproximated_ = val;
}
@@ -53,6 +54,11 @@ class IncrementalMarking {
inline bool IsComplete() { return state() == COMPLETE; }
+ inline bool IsReadyToOverApproximateWeakClosure() const {
+ return request_type_ == OVERAPPROXIMATION &&
+ !weak_closure_was_overapproximated_;
+ }
+
GCRequestType request_type() const { return request_type_; }
bool WorthActivating();
@@ -67,6 +73,8 @@ class IncrementalMarking {
void Stop();
+ void MarkObjectGroups();
+
void PrepareForScavenge();
void UpdateMarkingDequeAfterScavenge();
@@ -77,7 +85,7 @@ class IncrementalMarking {
void Abort();
- void OverApproximateWeakClosure();
+ void OverApproximateWeakClosure(CompletionAction action);
void MarkingComplete(CompletionAction action);
@@ -189,6 +197,10 @@ class IncrementalMarking {
bool IsIdleMarkingDelayCounterLimitReached();
+ INLINE(static void MarkObject(Heap* heap, HeapObject* object));
+
+ Heap* heap() const { return heap_; }
+
private:
int64_t SpaceLeftInOldSpace();
@@ -243,6 +255,8 @@ class IncrementalMarking {
bool weak_closure_was_overapproximated_;
+ int weak_closure_approximation_rounds_;
+
GCRequestType request_type_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index b525bf6ac2..4f7f61e1ee 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -99,6 +99,7 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) {
for (Address current = bottom; current < top; current += kPointerSize) {
object = HeapObject::FromAddress(current);
if (MarkCompactCollector::IsMarked(object)) {
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
@@ -138,7 +139,6 @@ static void VerifyMarking(Heap* heap) {
VerifyMarking(heap->old_data_space());
VerifyMarking(heap->code_space());
VerifyMarking(heap->cell_space());
- VerifyMarking(heap->property_cell_space());
VerifyMarking(heap->map_space());
VerifyMarking(heap->new_space());
@@ -219,7 +219,6 @@ static void VerifyEvacuation(Heap* heap) {
VerifyEvacuation(heap, heap->old_data_space());
VerifyEvacuation(heap, heap->code_space());
VerifyEvacuation(heap, heap->cell_space());
- VerifyEvacuation(heap, heap->property_cell_space());
VerifyEvacuation(heap, heap->map_space());
VerifyEvacuation(heap->new_space());
@@ -262,11 +261,6 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
if (!compacting_) {
DCHECK(evacuation_candidates_.length() == 0);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- // If GDBJIT interface is active disable compaction.
- if (FLAG_gdbjit) return false;
-#endif
-
CollectEvacuationCandidates(heap()->old_pointer_space());
CollectEvacuationCandidates(heap()->old_data_space());
@@ -280,7 +274,6 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->map_space());
TraceFragmentation(heap()->cell_space());
- TraceFragmentation(heap()->property_cell_space());
}
heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
@@ -294,6 +287,60 @@ bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
}
+void MarkCompactCollector::ClearInvalidSlotsBufferEntries(PagedSpace* space) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
+ }
+}
+
+
+void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
+ heap_->store_buffer()->ClearInvalidStoreBufferEntries();
+
+ ClearInvalidSlotsBufferEntries(heap_->old_pointer_space());
+ ClearInvalidSlotsBufferEntries(heap_->old_data_space());
+ ClearInvalidSlotsBufferEntries(heap_->code_space());
+ ClearInvalidSlotsBufferEntries(heap_->cell_space());
+ ClearInvalidSlotsBufferEntries(heap_->map_space());
+
+ LargeObjectIterator it(heap_->lo_space());
+ for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ SlotsBuffer::RemoveInvalidSlots(heap_, chunk->slots_buffer());
+ }
+}
+
+
+#ifdef VERIFY_HEAP
+static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ SlotsBuffer::VerifySlots(heap, p->slots_buffer());
+ }
+}
+
+
+static void VerifyValidStoreAndSlotsBufferEntries(Heap* heap) {
+ heap->store_buffer()->VerifyValidStoreBufferEntries();
+
+ VerifyValidSlotsBufferEntries(heap, heap->old_pointer_space());
+ VerifyValidSlotsBufferEntries(heap, heap->old_data_space());
+ VerifyValidSlotsBufferEntries(heap, heap->code_space());
+ VerifyValidSlotsBufferEntries(heap, heap->cell_space());
+ VerifyValidSlotsBufferEntries(heap, heap->map_space());
+
+ LargeObjectIterator it(heap->lo_space());
+ for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ SlotsBuffer::VerifySlots(heap, chunk->slots_buffer());
+ }
+}
+#endif
+
+
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
@@ -319,6 +366,14 @@ void MarkCompactCollector::CollectGarbage() {
}
#endif
+ ClearInvalidStoreAndSlotsBufferEntries();
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ VerifyValidStoreAndSlotsBufferEntries(heap_);
+ }
+#endif
+
SweepSpaces();
#ifdef VERIFY_HEAP
@@ -367,7 +422,6 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
VerifyMarkbitsAreClean(heap_->old_data_space());
VerifyMarkbitsAreClean(heap_->code_space());
VerifyMarkbitsAreClean(heap_->cell_space());
- VerifyMarkbitsAreClean(heap_->property_cell_space());
VerifyMarkbitsAreClean(heap_->map_space());
VerifyMarkbitsAreClean(heap_->new_space());
@@ -426,7 +480,6 @@ void MarkCompactCollector::ClearMarkbits() {
ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
ClearMarkbitsInPagedSpace(heap_->old_data_space());
ClearMarkbitsInPagedSpace(heap_->cell_space());
- ClearMarkbitsInPagedSpace(heap_->property_cell_space());
ClearMarkbitsInNewSpace(heap_->new_space());
LargeObjectIterator it(heap_->lo_space());
@@ -477,12 +530,12 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
// If sweeping is not completed or not running at all, we try to complete it
// here.
- if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
+ if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) {
SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
}
// Wait twice for both jobs.
- if (FLAG_concurrent_sweeping) {
+ if (heap()->concurrent_sweeping_enabled()) {
pending_sweeper_jobs_semaphore_.Wait();
pending_sweeper_jobs_semaphore_.Wait();
}
@@ -584,8 +637,6 @@ const char* AllocationSpaceName(AllocationSpace space) {
return "MAP_SPACE";
case CELL_SPACE:
return "CELL_SPACE";
- case PROPERTY_CELL_SPACE:
- return "PROPERTY_CELL_SPACE";
case LO_SPACE:
return "LO_SPACE";
default:
@@ -602,11 +653,11 @@ const char* AllocationSpaceName(AllocationSpace space) {
static int FreeListFragmentation(PagedSpace* space, Page* p) {
// If page was not swept then there are no free list items on it.
if (!p->WasSwept()) {
- if (FLAG_trace_fragmentation) {
+ if (FLAG_trace_fragmentation_verbose) {
PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
AllocationSpaceName(space->identity()), p->LiveBytes());
}
- return 0;
+ return FLAG_always_compact ? 1 : 0;
}
PagedSpace::SizeStats sizes;
@@ -623,7 +674,7 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
ratio_threshold = 15;
}
- if (FLAG_trace_fragmentation) {
+ if (FLAG_trace_fragmentation_verbose) {
PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
static_cast<int>(sizes.small_size_),
@@ -699,6 +750,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
max_evacuation_candidates *= 2;
}
+ if (FLAG_always_compact) {
+ max_evacuation_candidates = kMaxMaxEvacuationCandidates;
+ }
+
if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
PrintF(
"Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
@@ -712,24 +767,42 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Candidate candidates[kMaxMaxEvacuationCandidates];
+ if (FLAG_trace_fragmentation &&
+ max_evacuation_candidates >= kMaxMaxEvacuationCandidates) {
+ PrintF("Hit max page compaction limit of %d pages\n",
+ kMaxMaxEvacuationCandidates);
+ }
max_evacuation_candidates =
Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
int count = 0;
int fragmentation = 0;
+ int page_number = 0;
Candidate* least = NULL;
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
if (p->NeverEvacuate()) continue;
- p->ClearEvacuationCandidate();
+
+ // Invariant: Evacuation candidates are just created when marking is
+ // started. At the end of a GC all evacuation candidates are cleared and
+ // their slot buffers are released.
+ CHECK(!p->IsEvacuationCandidate());
+ CHECK(p->slots_buffer() == NULL);
if (FLAG_stress_compaction) {
- unsigned int counter = space->heap()->ms_count();
- uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
- if ((counter & 1) == (page_number & 1)) fragmentation = 1;
- } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
+ if (FLAG_manual_evacuation_candidates_selection) {
+ if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
+ p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ fragmentation = 1;
+ }
+ } else {
+ unsigned int counter = space->heap()->ms_count();
+ if ((counter & 1) == (page_number & 1)) fragmentation = 1;
+ page_number++;
+ }
+ } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) {
// Don't try to release too many pages.
if (estimated_release >= over_reserved) {
continue;
@@ -754,7 +827,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
fragmentation = 0;
}
- if (FLAG_trace_fragmentation) {
+ if (FLAG_trace_fragmentation_verbose) {
PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
AllocationSpaceName(space->identity()),
static_cast<int>(free_bytes),
@@ -1470,8 +1543,9 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker<
heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
fixed_array_size);
}
- if (map_obj->HasTransitionArray()) {
- int fixed_array_size = map_obj->transitions()->Size();
+ if (TransitionArray::IsFullTransitionArray(map_obj->raw_transitions())) {
+ int fixed_array_size =
+ TransitionArray::cast(map_obj->raw_transitions())->Size();
heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
fixed_array_size);
}
@@ -1966,7 +2040,8 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
}
-void MarkCompactCollector::MarkImplicitRefGroups() {
+void MarkCompactCollector::MarkImplicitRefGroups(
+ MarkObjectFunction mark_object) {
List<ImplicitRefGroup*>* ref_groups =
isolate()->global_handles()->implicit_ref_groups();
@@ -1984,9 +2059,7 @@ void MarkCompactCollector::MarkImplicitRefGroups() {
// A parent object is marked, so mark all child heap objects.
for (size_t j = 0; j < entry->length; ++j) {
if ((*children[j])->IsHeapObject()) {
- HeapObject* child = HeapObject::cast(*children[j]);
- MarkBit mark = Marking::MarkBitFrom(child);
- MarkObject(child, mark);
+ mark_object(heap(), HeapObject::cast(*children[j]));
}
}
@@ -2050,10 +2123,6 @@ void MarkCompactCollector::RefillMarkingDeque() {
DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
- heap()->property_cell_space());
- if (marking_deque_.IsFull()) return;
-
LargeObjectIterator lo_it(heap()->lo_space());
DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
if (marking_deque_.IsFull()) return;
@@ -2085,7 +2154,7 @@ void MarkCompactCollector::ProcessEphemeralMarking(
if (!only_process_harmony_weak_collections) {
isolate()->global_handles()->IterateObjectGroups(
visitor, &IsUnmarkedHeapObjectWithHeap);
- MarkImplicitRefGroups();
+ MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
}
ProcessWeakCollections();
work_to_do = !marking_deque_.IsEmpty();
@@ -2112,6 +2181,71 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
+void MarkCompactCollector::RetainMaps() {
+ if (reduce_memory_footprint_ || abort_incremental_marking_ ||
+ FLAG_retain_maps_for_n_gc == 0) {
+ // Do not retain dead maps if flag disables it or there is
+ // - memory pressure (reduce_memory_footprint_),
+ // - GC is requested by tests or dev-tools (abort_incremental_marking_).
+ return;
+ }
+
+ ArrayList* retained_maps = heap()->retained_maps();
+ int length = retained_maps->Length();
+ int new_length = 0;
+ for (int i = 0; i < length; i += 2) {
+ DCHECK(retained_maps->Get(i)->IsWeakCell());
+ WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
+ if (cell->cleared()) continue;
+ int age = Smi::cast(retained_maps->Get(i + 1))->value();
+ int new_age;
+ Map* map = Map::cast(cell->value());
+ MarkBit map_mark = Marking::MarkBitFrom(map);
+ if (!map_mark.Get()) {
+ if (age == 0) {
+ // The map has aged. Do not retain this map.
+ continue;
+ }
+ Object* constructor = map->GetConstructor();
+ if (!constructor->IsHeapObject() ||
+ !Marking::MarkBitFrom(HeapObject::cast(constructor)).Get()) {
+ // The constructor is dead, no new objects with this map can
+ // be created. Do not retain this map.
+ continue;
+ }
+ Object* prototype = map->prototype();
+ if (prototype->IsHeapObject() &&
+ !Marking::MarkBitFrom(HeapObject::cast(prototype)).Get()) {
+ // The prototype is not marked, age the map.
+ new_age = age - 1;
+ } else {
+ // The prototype and the constructor are marked, this map keeps only
+ // transition tree alive, not JSObjects. Do not age the map.
+ new_age = age;
+ }
+ MarkObject(map, map_mark);
+ } else {
+ new_age = FLAG_retain_maps_for_n_gc;
+ }
+ if (i != new_length) {
+ retained_maps->Set(new_length, cell);
+ Object** slot = retained_maps->Slot(new_length);
+ RecordSlot(slot, slot, cell);
+ retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
+ } else if (new_age != age) {
+ retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
+ }
+ new_length += 2;
+ }
+ Object* undefined = heap()->undefined_value();
+ for (int i = new_length; i < length; i++) {
+ retained_maps->Clear(i, undefined);
+ }
+ if (new_length != length) retained_maps->SetLength(new_length);
+ ProcessMarkingDeque();
+}
+
+
void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() {
if (marking_deque_memory_ == NULL) {
marking_deque_memory_ = new base::VirtualMemory(4 * MB);
@@ -2150,21 +2284,6 @@ void MarkCompactCollector::UncommitMarkingDeque() {
}
-void MarkCompactCollector::OverApproximateWeakClosure() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_INCREMENTAL_WEAKCLOSURE);
-
- RootMarkingVisitor root_visitor(heap());
- isolate()->global_handles()->IterateObjectGroups(
- &root_visitor, &IsUnmarkedHeapObjectWithHeap);
- MarkImplicitRefGroups();
-
- // Remove object groups after marking phase.
- heap()->isolate()->global_handles()->RemoveObjectGroups();
- heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
-}
-
-
void MarkCompactCollector::MarkLiveObjects() {
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
double start_time = 0.0;
@@ -2209,17 +2328,6 @@ void MarkCompactCollector::MarkLiveObjects() {
}
}
}
- {
- HeapObjectIterator js_global_property_cell_iterator(
- heap()->property_cell_space());
- HeapObject* cell;
- while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
- DCHECK(cell->IsPropertyCell());
- if (IsMarked(cell)) {
- MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
- }
- }
- }
}
RootMarkingVisitor root_visitor(heap());
@@ -2227,6 +2335,11 @@ void MarkCompactCollector::MarkLiveObjects() {
ProcessTopOptimizedFrame(&root_visitor);
+ // Retaining dying maps should happen before or during ephemeral marking
+ // because a map could keep the key of an ephemeron alive. Note that map
+ // aging is imprecise: maps that are kept alive only by ephemerons will age.
+ RetainMaps();
+
{
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCLOSURE);
@@ -2332,6 +2445,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
if (!table->IsKey(key)) continue;
uint32_t value_index = table->EntryToValueIndex(i);
Object* value = table->get(value_index);
+ DCHECK(key->IsWeakCell());
if (WeakCell::cast(key)->cleared()) {
have_code_to_deoptimize_ |=
DependentCode::cast(value)->MarkCodeForDeoptimization(
@@ -2345,10 +2459,12 @@ void MarkCompactCollector::ClearNonLiveReferences() {
void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
- int number_of_transitions = map->NumberOfProtoTransitions();
- FixedArray* prototype_transitions = map->GetPrototypeTransitions();
+ FixedArray* prototype_transitions =
+ TransitionArray::GetPrototypeTransitions(map);
+ int number_of_transitions =
+ TransitionArray::NumberOfPrototypeTransitions(prototype_transitions);
- const int header = Map::kProtoTransitionHeaderSize;
+ const int header = TransitionArray::kProtoTransitionHeaderSize;
int new_number_of_transitions = 0;
for (int i = 0; i < number_of_transitions; i++) {
Object* cached_map = prototype_transitions->get(header + i);
@@ -2362,7 +2478,8 @@ void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
}
if (new_number_of_transitions != number_of_transitions) {
- map->SetNumberOfProtoTransitions(new_number_of_transitions);
+ TransitionArray::SetNumberOfPrototypeTransitions(prototype_transitions,
+ new_number_of_transitions);
}
// Fill slots that became free with undefined value.
@@ -2383,7 +2500,7 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
bool current_is_alive = map_mark.Get();
bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
if (!current_is_alive && parent_is_alive) {
- ClearMapTransitions(parent);
+ ClearMapTransitions(parent, map);
}
}
@@ -2397,28 +2514,43 @@ bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
}
-void MarkCompactCollector::ClearMapTransitions(Map* map) {
- // If there are no transitions to be cleared, return.
- // TODO(verwaest) Should be an assert, otherwise back pointers are not
- // properly cleared.
- if (!map->HasTransitionArray()) return;
+void MarkCompactCollector::ClearMapTransitions(Map* map, Map* dead_transition) {
+ Object* transitions = map->raw_transitions();
+ int num_transitions = TransitionArray::NumberOfTransitions(transitions);
- TransitionArray* t = map->transitions();
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ DescriptorArray* descriptors = map->instance_descriptors();
+
+ // A previously existing simple transition (stored in a WeakCell) may have
+ // been cleared. Clear the useless cell pointer, and take ownership
+ // of the descriptor array.
+ if (transitions->IsWeakCell() && WeakCell::cast(transitions)->cleared()) {
+ map->set_raw_transitions(Smi::FromInt(0));
+ }
+ if (num_transitions == 0 &&
+ descriptors == dead_transition->instance_descriptors() &&
+ number_of_own_descriptors > 0) {
+ TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
+ DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ map->set_owns_descriptors(true);
+ return;
+ }
int transition_index = 0;
- DescriptorArray* descriptors = map->instance_descriptors();
bool descriptors_owner_died = false;
// Compact all live descriptors to the left.
- for (int i = 0; i < t->number_of_transitions(); ++i) {
- Map* target = t->GetTarget(i);
+ for (int i = 0; i < num_transitions; ++i) {
+ Map* target = TransitionArray::GetTarget(transitions, i);
if (ClearMapBackPointer(target)) {
if (target->instance_descriptors() == descriptors) {
descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
+ DCHECK(TransitionArray::IsFullTransitionArray(transitions));
+ TransitionArray* t = TransitionArray::cast(transitions);
Name* key = t->GetKey(i);
t->SetKey(transition_index, key);
Object** key_slot = t->GetKeySlot(transition_index);
@@ -2433,9 +2565,7 @@ void MarkCompactCollector::ClearMapTransitions(Map* map) {
// If there are no transitions to be cleared, return.
// TODO(verwaest) Should be an assert, otherwise back pointers are not
// properly cleared.
- if (transition_index == t->number_of_transitions()) return;
-
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (transition_index == num_transitions) return;
if (descriptors_owner_died) {
if (number_of_own_descriptors > 0) {
@@ -2451,14 +2581,17 @@ void MarkCompactCollector::ClearMapTransitions(Map* map) {
// such that number_of_transitions() == 0. If this assumption changes,
// TransitionArray::Insert() will need to deal with the case that a transition
// array disappeared during GC.
- int trim = t->number_of_transitions_storage() - transition_index;
+ int trim = TransitionArray::Capacity(transitions) - transition_index;
if (trim > 0) {
+ // Non-full-TransitionArray cases can never reach this point.
+ DCHECK(TransitionArray::IsFullTransitionArray(transitions));
+ TransitionArray* t = TransitionArray::cast(transitions);
heap_->RightTrimFixedArray<Heap::FROM_GC>(
- t, t->IsSimpleTransition() ? trim
- : trim * TransitionArray::kTransitionSize);
+ t, trim * TransitionArray::kTransitionSize);
t->SetNumberOfTransitions(transition_index);
+ // The map still has a full transition array.
+ DCHECK(TransitionArray::IsFullTransitionArray(map->raw_transitions()));
}
- DCHECK(map->HasTransitionArray());
}
@@ -2475,6 +2608,13 @@ void MarkCompactCollector::TrimDescriptorArray(Map* map,
if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
descriptors->Sort();
+
+ if (FLAG_unbox_double_fields) {
+ LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+ layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
+ number_of_own_descriptors);
+ SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
+ }
}
@@ -2750,6 +2890,8 @@ class PointersUpdatingVisitor : public ObjectVisitor {
// Avoid unnecessary changes that might unnecessary flush the instruction
// cache.
if (target != old_target) {
+ // TODO(jochen): Remove again after fixing http://crbug.com/452095
+ CHECK(target->IsHeapObject() == old_target->IsHeapObject());
rinfo->set_target_object(target);
}
}
@@ -2760,6 +2902,8 @@ class PointersUpdatingVisitor : public ObjectVisitor {
Object* old_target = target;
VisitPointer(&target);
if (target != old_target) {
+ // TODO(jochen): Remove again after fixing http://crbug.com/452095
+ CHECK(target->IsHeapObject() == old_target->IsHeapObject());
rinfo->set_target_address(Code::cast(target)->instruction_start());
}
}
@@ -2770,6 +2914,8 @@ class PointersUpdatingVisitor : public ObjectVisitor {
DCHECK(stub != NULL);
VisitPointer(&stub);
if (stub != rinfo->code_age_stub()) {
+ // TODO(jochen): Remove again after fixing http://crbug.com/452095
+ CHECK(stub->IsHeapObject() == rinfo->code_age_stub()->IsHeapObject());
rinfo->set_code_age_stub(Code::cast(stub));
}
}
@@ -2781,6 +2927,9 @@ class PointersUpdatingVisitor : public ObjectVisitor {
rinfo->IsPatchedDebugBreakSlotSequence()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
VisitPointer(&target);
+ // TODO(jochen): Remove again after fixing http://crbug.com/452095
+ CHECK(target->IsCode() &&
+ HAS_SMI_TAG(Code::cast(target)->instruction_start()));
rinfo->set_call_address(Code::cast(target)->instruction_start());
}
@@ -2794,12 +2943,14 @@ class PointersUpdatingVisitor : public ObjectVisitor {
// TODO(ishell): remove, once crbug/454297 is caught.
#if V8_TARGET_ARCH_64_BIT
+#ifndef V8_OS_AIX // no point checking on AIX as full 64 range is supported
const uintptr_t kBoundary = V8_UINT64_C(1) << 48;
STATIC_ASSERT(kBoundary > 0);
if (reinterpret_cast<uintptr_t>(heap_obj->address()) >= kBoundary) {
CheckLayoutDescriptorAndDie(heap, slot);
}
#endif
+#endif
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
DCHECK(heap->InFromSpace(heap_obj) ||
@@ -2852,11 +3003,9 @@ void PointersUpdatingVisitor::CheckLayoutDescriptorAndDie(Heap* heap,
space_owner_id = 6;
} else if (heap->cell_space()->ContainsSafe(slot_address)) {
space_owner_id = 7;
- } else if (heap->property_cell_space()->ContainsSafe(slot_address)) {
- space_owner_id = 8;
} else {
// Lo space or other.
- space_owner_id = 9;
+ space_owner_id = 8;
}
data[index++] = space_owner_id;
data[index++] = 0x20aaaaaaaaUL;
@@ -2918,19 +3067,18 @@ void PointersUpdatingVisitor::CheckLayoutDescriptorAndDie(Heap* heap,
static void UpdatePointer(HeapObject** address, HeapObject* object) {
- Address new_addr = Memory::Address_at(object->address());
-
- // The new space sweep will overwrite the map word of dead objects
- // with NULL. In this case we do not need to transfer this entry to
- // the store buffer which we are rebuilding.
- // We perform the pointer update with a no barrier compare-and-swap. The
- // compare and swap may fail in the case where the pointer update tries to
- // update garbage memory which was concurrently accessed by the sweeper.
- if (new_addr != NULL) {
- base::NoBarrier_CompareAndSwap(
- reinterpret_cast<base::AtomicWord*>(address),
- reinterpret_cast<base::AtomicWord>(object),
- reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
+ MapWord map_word = object->map_word();
+ // The store buffer can still contain stale pointers in dead large objects.
+ // Ignore these pointers here.
+ DCHECK(map_word.IsForwardingAddress() ||
+ object->GetHeap()->lo_space()->FindPage(
+ reinterpret_cast<Address>(address)) != NULL);
+ if (map_word.IsForwardingAddress()) {
+ // TODO(jochen): Remove again after fixing http://crbug.com/452095
+ CHECK((*address)->IsHeapObject() ==
+ map_word.ToForwardingAddress()->IsHeapObject());
+ // Update the corresponding slot.
+ *address = map_word.ToForwardingAddress();
}
}
@@ -2967,6 +3115,162 @@ bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
}
+bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
+ HeapObject** out_object) {
+ // This function does not support large objects right now.
+ Space* owner = p->owner();
+ if (owner == heap_->lo_space() || owner == NULL) {
+ *out_object = NULL;
+ return true;
+ }
+
+ uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
+ unsigned int start_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType index_in_cell = 1U
+ << (mark_bit_index & Bitmap::kBitIndexMask);
+ MarkBit::CellType* cells = p->markbits()->cells();
+ Address cell_base = p->area_start();
+ unsigned int cell_base_start_index = Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(cell_base)));
+
+ // Check if the slot points to the start of an object. This can happen e.g.
+ // when we left trim a fixed array. Such slots are invalid and we can remove
+ // them.
+ if ((cells[start_index] & index_in_cell) != 0) {
+ return false;
+ }
+
+ // Check if the object is in the current cell.
+ MarkBit::CellType slot_mask;
+ if ((cells[start_index] == 0) ||
+ (base::bits::CountTrailingZeros32(cells[start_index]) >
+ base::bits::CountTrailingZeros32(cells[start_index] | index_in_cell))) {
+ // If we are already in the first cell, there is no live object.
+ if (start_index == cell_base_start_index) return false;
+
+ // If not, find a cell in a preceding cell slot that has a mark bit set.
+ do {
+ start_index--;
+ } while (start_index > cell_base_start_index && cells[start_index] == 0);
+
+ // The slot must be in a dead object if there are no preceding cells that
+ // have mark bits set.
+ if (cells[start_index] == 0) {
+ return false;
+ }
+
+ // The object is in a preceding cell. Set the mask to find any object.
+ slot_mask = 0xffffffff;
+ } else {
+ // The object start is before the the slot index. Hence, in this case the
+ // slot index can not be at the beginning of the cell.
+ CHECK(index_in_cell > 1);
+ // We are interested in object mark bits right before the slot.
+ slot_mask = index_in_cell - 1;
+ }
+
+ MarkBit::CellType current_cell = cells[start_index];
+ CHECK(current_cell != 0);
+
+ // Find the last live object in the cell.
+ unsigned int leading_zeros =
+ base::bits::CountLeadingZeros32(current_cell & slot_mask);
+ CHECK(leading_zeros != 32);
+ unsigned int offset = Bitmap::kBitIndexMask - leading_zeros;
+
+ cell_base += (start_index - cell_base_start_index) * 32 * kPointerSize;
+ Address address = cell_base + offset * kPointerSize;
+ HeapObject* object = HeapObject::FromAddress(address);
+ CHECK(object->address() < reinterpret_cast<Address>(slot));
+ if (object->address() <= slot &&
+ (object->address() + object->Size()) > slot) {
+ // If the slot is within the last found object in the cell, the slot is
+ // in a live object.
+ *out_object = object;
+ return true;
+ }
+ return false;
+}
+
+
+bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
+ // This function does not support large objects right now.
+ Space* owner = p->owner();
+ if (owner == heap_->lo_space() || owner == NULL) return true;
+
+ for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+ Address cell_base = it.CurrentCellBase();
+ MarkBit::CellType* cell = it.CurrentCell();
+
+ MarkBit::CellType current_cell = *cell;
+ if (current_cell == 0) continue;
+
+ int offset = 0;
+ while (current_cell != 0) {
+ int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
+ current_cell >>= trailing_zeros;
+ offset += trailing_zeros;
+ Address address = cell_base + offset * kPointerSize;
+
+ HeapObject* object = HeapObject::FromAddress(address);
+ int size = object->Size();
+
+ if (object->address() > slot) return false;
+ if (object->address() <= slot && slot < (object->address() + size)) {
+ return true;
+ }
+
+ offset++;
+ current_cell >>= 1;
+ }
+ }
+ return false;
+}
+
+
+bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
+ HeapObject* object = NULL;
+ // The target object is black but we don't know if the source slot is black.
+ // The source object could have died and the slot could be part of a free
+ // space. Find out based on mark bits if the slot is part of a live object.
+ if (!IsSlotInBlackObject(Page::FromAddress(slot), slot, &object)) {
+ return false;
+ }
+
+#if V8_DOUBLE_FIELDS_UNBOXING
+ // |object| is NULL only when the slot belongs to large object space.
+ DCHECK(object != NULL ||
+ Page::FromAnyPointerAddress(heap_, slot)->owner() ==
+ heap_->lo_space());
+ // We don't need to check large objects' layout descriptor since it can't
+ // contain in-object fields anyway.
+ if (object != NULL) {
+ // Filter out slots that happens to point to unboxed double fields.
+ LayoutDescriptorHelper helper(object->map());
+ bool has_only_tagged_fields = helper.all_fields_tagged();
+ if (!has_only_tagged_fields &&
+ !helper.IsTagged(static_cast<int>(slot - object->address()))) {
+ return false;
+ }
+ }
+#endif
+
+ return true;
+}
+
+
+void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
+ HeapObject* object) {
+ // The target object has to be black.
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+ // The target object is black but we don't know if the source slot is black.
+ // The source object could have died and the slot could be part of a free
+ // space. Use the mark bit iterator to find out about liveness of the slot.
+ CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
+}
+
+
void MarkCompactCollector::EvacuateNewSpace() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
@@ -3050,6 +3354,7 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
void MarkCompactCollector::EvacuatePages() {
int npages = evacuation_candidates_.length();
+ int abandoned_pages = 0;
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
@@ -3060,12 +3365,13 @@ void MarkCompactCollector::EvacuatePages() {
// Allocate emergency memory for the case when compaction fails due to out
// of memory.
if (!space->HasEmergencyMemory()) {
- space->CreateEmergencyMemory();
+ space->CreateEmergencyMemory(); // If the OS lets us.
}
if (p->IsEvacuationCandidate()) {
- // During compaction we might have to request a new page. Check that we
- // have an emergency page and the space still has room for that.
- if (space->HasEmergencyMemory() && space->CanExpand()) {
+ // During compaction we might have to request a new page in order to free
+ // up a page. Check that we actually got an emergency page above so we
+ // can guarantee that this succeeds.
+ if (space->HasEmergencyMemory()) {
EvacuateLiveObjectsFromPage(p);
// Unlink the page from the list of pages here. We must not iterate
// over that page later (e.g. when scan on scavenge pages are
@@ -3081,6 +3387,7 @@ void MarkCompactCollector::EvacuatePages() {
page->ClearEvacuationCandidate();
page->SetFlag(Page::RESCAN_ON_EVACUATION);
}
+ abandoned_pages = npages - i;
break;
}
}
@@ -3094,6 +3401,16 @@ void MarkCompactCollector::EvacuatePages() {
space->FreeEmergencyMemory();
}
}
+ if (FLAG_trace_fragmentation) {
+ if (abandoned_pages != 0) {
+ PrintF(
+ " Abandon %d out of %d page defragmentations due to lack of "
+ "memory\n",
+ abandoned_pages, npages);
+ } else {
+ PrintF(" Defragmented %d pages\n", npages);
+ }
+ }
}
}
@@ -3218,11 +3535,6 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
}
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
- GDBJITInterface::RemoveCodeRange(free_start, free_end);
- }
-#endif
}
HeapObject* live_object = HeapObject::FromAddress(free_end);
DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
@@ -3252,11 +3564,6 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
}
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
- GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
- }
-#endif
}
p->ResetLiveBytes();
@@ -3439,8 +3746,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
&Heap::ScavengeStoreBufferCallback);
- heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
- &UpdatePointer);
+ heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
}
{
@@ -3448,7 +3754,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
code_slots_filtering_required);
- if (FLAG_trace_fragmentation) {
+ if (FLAG_trace_fragmentation_verbose) {
PrintF(" migration slots buffer: %d\n",
SlotsBuffer::SizeOfChain(migration_slots_buffer_));
}
@@ -3483,7 +3789,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
if (p->IsEvacuationCandidate()) {
SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
code_slots_filtering_required);
- if (FLAG_trace_fragmentation) {
+ if (FLAG_trace_fragmentation_verbose) {
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
}
@@ -3543,15 +3849,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
}
}
- HeapObjectIterator js_global_property_cell_iterator(
- heap_->property_cell_space());
- for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
- cell = js_global_property_cell_iterator.Next()) {
- if (cell->IsPropertyCell()) {
- PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
- }
- }
-
heap_->string_table()->Iterate(&updating_visitor);
// Update pointers from external string table.
@@ -4159,7 +4456,7 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(heap()->old_data_space(), CONCURRENT_SWEEPING);
}
sweeping_in_progress_ = true;
- if (FLAG_concurrent_sweeping) {
+ if (heap()->concurrent_sweeping_enabled()) {
StartSweeperThreads();
}
}
@@ -4175,12 +4472,11 @@ void MarkCompactCollector::SweepSpaces() {
GCTracer::Scope sweep_scope(heap()->tracer(),
GCTracer::Scope::MC_SWEEP_CELL);
SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
- SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
}
EvacuateNewSpaceAndCandidates();
- // ClearNonLiveTransitions depends on precise sweeping of map space to
+ // ClearNonLiveReferences depends on precise sweeping of map space to
// detect whether unmarked map became dead in this collection or in one
// of the previous ones.
{
@@ -4291,6 +4587,63 @@ bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
}
+void SlotsBuffer::RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer) {
+ // Remove entries by replacing them with an old-space slot containing a smi
+ // that is located in an unmovable page.
+ const ObjectSlot kRemovedEntry = HeapObject::RawField(
+ heap->empty_fixed_array(), FixedArrayBase::kLengthOffset);
+ DCHECK(Page::FromAddress(reinterpret_cast<Address>(kRemovedEntry))
+ ->NeverEvacuate());
+
+ while (buffer != NULL) {
+ SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+ intptr_t slots_count = buffer->idx_;
+
+ for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+ ObjectSlot slot = slots[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ Object* object = *slot;
+ if (object->IsHeapObject()) {
+ if (heap->InNewSpace(object) ||
+ !heap->mark_compact_collector()->IsSlotInLiveObject(
+ reinterpret_cast<Address>(slot))) {
+ slots[slot_idx] = kRemovedEntry;
+ }
+ }
+ } else {
+ ++slot_idx;
+ DCHECK(slot_idx < slots_count);
+ }
+ }
+ buffer = buffer->next();
+ }
+}
+
+
+void SlotsBuffer::VerifySlots(Heap* heap, SlotsBuffer* buffer) {
+ while (buffer != NULL) {
+ SlotsBuffer::ObjectSlot* slots = buffer->slots_;
+ intptr_t slots_count = buffer->idx_;
+
+ for (int slot_idx = 0; slot_idx < slots_count; ++slot_idx) {
+ ObjectSlot slot = slots[slot_idx];
+ if (!IsTypedSlot(slot)) {
+ Object* object = *slot;
+ if (object->IsHeapObject()) {
+ CHECK(!heap->InNewSpace(object));
+ CHECK(heap->mark_compact_collector()->IsSlotInLiveObject(
+ reinterpret_cast<Address>(slot)));
+ }
+ } else {
+ ++slot_idx;
+ DCHECK(slot_idx < slots_count);
+ }
+ }
+ buffer = buffer->next();
+ }
+}
+
+
static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
if (RelocInfo::IsCodeTarget(rmode)) {
return SlotsBuffer::CODE_TARGET_SLOT;
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 589bebf63f..3ffeeed7b1 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -16,6 +16,9 @@ namespace internal {
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
+// Callback function to mark an object in a given heap.
+typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
+
// Forward declarations.
class CodeFlusher;
class MarkCompactCollector;
@@ -360,6 +363,15 @@ class SlotsBuffer {
SlotsBuffer** buffer_address, SlotType type, Address addr,
AdditionMode mode);
+ // Eliminates all stale entries from the slots buffer, i.e., slots that
+ // are not part of live objects anymore. This method must be called after
+ // marking, when the whole transitive closure is known and must be called
+ // before sweeping when mark bits are still intact.
+ static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
+
+ // Ensures that there are no invalid slots in the chain of slots buffers.
+ static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
+
static const int kNumberOfElements = 1021;
private:
@@ -547,6 +559,7 @@ class MarkCompactCollector {
static const uint32_t kMultiFreeEncoding = 1;
static inline bool IsMarked(Object* obj);
+ static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
inline Heap* heap() const { return heap_; }
inline Isolate* isolate() const;
@@ -660,6 +673,10 @@ class MarkCompactCollector {
// to artificially keep AllocationSites alive for a time.
void MarkAllocationSite(AllocationSite* site);
+ // Mark objects in implicit references groups if their parent object
+ // is marked.
+ void MarkImplicitRefGroups(MarkObjectFunction mark_object);
+
MarkingDeque* marking_deque() { return &marking_deque_; }
void EnsureMarkingDequeIsCommittedAndInitialize();
@@ -668,7 +685,13 @@ class MarkCompactCollector {
void UncommitMarkingDeque();
- void OverApproximateWeakClosure();
+ // The following four methods can just be called after marking, when the
+ // whole transitive closure is known. They must be called before sweeping
+ // when mark bits are still intact.
+ bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
+ bool IsSlotInBlackObjectSlow(Page* p, Address slot);
+ bool IsSlotInLiveObject(Address slot);
+ void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
private:
class SweeperTask;
@@ -680,6 +703,8 @@ class MarkCompactCollector {
bool WillBeDeoptimized(Code* code);
void RemoveDeadInvalidatedCode();
void ProcessInvalidatedCode(ObjectVisitor* visitor);
+ void ClearInvalidSlotsBufferEntries(PagedSpace* space);
+ void ClearInvalidStoreAndSlotsBufferEntries();
void StartSweeperThreads();
@@ -765,10 +790,6 @@ class MarkCompactCollector {
// the string table are weak.
void MarkStringTable(RootMarkingVisitor* visitor);
- // Mark objects in implicit references groups if their parent object
- // is marked.
- void MarkImplicitRefGroups();
-
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
void ProcessMarkingDeque();
@@ -787,6 +808,10 @@ class MarkCompactCollector {
// otherwise a map can die and deoptimize the code.
void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
+ // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
+ // increase chances of reusing of map transition tree in future.
+ void RetainMaps();
+
// Mark objects reachable (transitively) from objects in the marking
// stack. This function empties the marking stack, but may leave
// overflowed objects in the heap, in which case the marking stack's
@@ -801,14 +826,13 @@ class MarkCompactCollector {
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
- static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
void ClearNonLiveReferences();
void ClearNonLivePrototypeTransitions(Map* map);
void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
- void ClearMapTransitions(Map* map);
+ void ClearMapTransitions(Map* map, Map* dead_transition);
bool ClearMapBackPointer(Map* map);
void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
int number_of_own_descriptors);
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 58afeae016..872b2dd30c 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -584,18 +584,13 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(Map* map,
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
Map* map) {
- // Make sure that the back pointer stored either in the map itself or
- // inside its transitions array is marked. Skip recording the back
- // pointer slot since map space is not compacted.
- StaticVisitor::MarkObject(heap, HeapObject::cast(map->GetBackPointer()));
-
- // Treat pointers in the transitions array as weak and also mark that
- // array to prevent visiting it later. Skip recording the transition
- // array slot, since it will be implicitly recorded when the pointer
- // fields of this map are visited.
- if (map->HasTransitionArray()) {
- TransitionArray* transitions = map->transitions();
- MarkTransitionArray(heap, transitions);
+ Object* raw_transitions = map->raw_transitions();
+ if (TransitionArray::IsSimpleTransition(raw_transitions)) {
+ StaticVisitor::VisitPointer(
+ heap, HeapObject::RawField(map, Map::kTransitionsOffset));
+ }
+ if (TransitionArray::IsFullTransitionArray(raw_transitions)) {
+ MarkTransitionArray(heap, TransitionArray::cast(raw_transitions));
}
// Since descriptor arrays are potentially shared, ensure that only the
@@ -631,20 +626,18 @@ void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
Heap* heap, TransitionArray* transitions) {
if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
- // Simple transitions do not have keys nor prototype transitions.
- if (transitions->IsSimpleTransition()) return;
-
if (transitions->HasPrototypeTransitions()) {
// Mark prototype transitions array but do not push it onto marking
// stack, this will make references from it weak. We will clean dead
- // prototype transitions in ClearNonLiveTransitions.
+ // prototype transitions in ClearNonLiveReferences.
Object** slot = transitions->GetPrototypeTransitionsSlot();
HeapObject* obj = HeapObject::cast(*slot);
heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
StaticVisitor::MarkObjectWithoutPush(heap, obj);
}
- for (int i = 0; i < transitions->number_of_transitions(); ++i) {
+ int num_transitions = TransitionArray::NumberOfTransitions(transitions);
+ for (int i = 0; i < num_transitions; ++i) {
StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i));
}
}
@@ -849,6 +842,8 @@ void Code::CodeIterateBody(ObjectVisitor* v) {
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::CELL) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
@@ -876,6 +871,8 @@ void Code::CodeIterateBody(Heap* heap) {
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::CELL) |
RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index 7b2e2d9a38..520e539c7c 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -191,15 +191,18 @@ struct WeakListVisitor;
template <class T>
-Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
+Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer,
+ bool stop_after_young) {
Object* undefined = heap->undefined_value();
Object* head = undefined;
T* tail = NULL;
MarkCompactCollector* collector = heap->mark_compact_collector();
bool record_slots = MustRecordSlots(heap);
+
while (list != undefined) {
// Check whether to keep the candidate in the list.
T* candidate = reinterpret_cast<T*>(list);
+
Object* retained = retainer->RetainAs(list);
if (retained != NULL) {
if (head == undefined) {
@@ -220,9 +223,9 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
candidate = reinterpret_cast<T*>(retained);
tail = candidate;
-
// tail is a live object, visit it.
WeakListVisitor<T>::VisitLiveObject(heap, tail, retainer);
+
} else {
WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
}
@@ -239,6 +242,56 @@ Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
}
+Object* VisitNewArrayBufferViewsWeakList(Heap* heap, Object* list,
+ WeakObjectRetainer* retainer) {
+ Object* undefined = heap->undefined_value();
+ Object* previous = undefined;
+ Object* head = undefined;
+ Object* next;
+ MarkCompactCollector* collector = heap->mark_compact_collector();
+ bool record_slots = MustRecordSlots(heap);
+
+ for (Object* o = list; o != undefined;) {
+ JSArrayBufferView* view = JSArrayBufferView::cast(o);
+ next = view->weak_next();
+ if (!heap->InNewSpace(view)) {
+ if (previous != undefined) {
+ // We are in the middle of the list, skip the old space element.
+ JSArrayBufferView* previous_view = JSArrayBufferView::cast(previous);
+ previous_view->set_weak_next(next);
+ if (record_slots) {
+ Object** next_slot = HeapObject::RawField(
+ previous_view, JSArrayBufferView::kWeakNextOffset);
+ collector->RecordSlot(next_slot, next_slot, next);
+ }
+ }
+ JSArrayBuffer* buffer = JSArrayBuffer::cast(view->buffer());
+ view->set_weak_next(buffer->weak_first_view());
+ if (record_slots) {
+ Object** next_slot =
+ HeapObject::RawField(view, JSArrayBufferView::kWeakNextOffset);
+ collector->RecordSlot(next_slot, next_slot, buffer->weak_first_view());
+ }
+ buffer->set_weak_first_view(view);
+ if (record_slots) {
+ Object** slot =
+ HeapObject::RawField(buffer, JSArrayBuffer::kWeakFirstViewOffset);
+ heap->mark_compact_collector()->RecordSlot(slot, slot, view);
+ }
+ } else {
+ // We found a valid new space view, remember it.
+ previous = view;
+ if (head == undefined) {
+ // We are at the list head.
+ head = view;
+ }
+ }
+ o = next;
+ }
+ return head;
+}
+
+
template <class T>
static void ClearWeakList(Heap* heap, Object* list) {
Object* undefined = heap->undefined_value();
@@ -316,7 +369,8 @@ struct WeakListVisitor<Context> {
static void DoWeakList(Heap* heap, Context* context,
WeakObjectRetainer* retainer, int index) {
// Visit the weak list, removing dead intermediate elements.
- Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer);
+ Object* list_head =
+ VisitWeakList<T>(heap, context->get(index), retainer, false);
// Update the list head.
context->set(index, list_head, UPDATE_WRITE_BARRIER);
@@ -368,7 +422,7 @@ struct WeakListVisitor<JSArrayBuffer> {
static void VisitLiveObject(Heap* heap, JSArrayBuffer* array_buffer,
WeakObjectRetainer* retainer) {
Object* typed_array_obj = VisitWeakList<JSArrayBufferView>(
- heap, array_buffer->weak_first_view(), retainer);
+ heap, array_buffer->weak_first_view(), retainer, false);
array_buffer->set_weak_first_view(typed_array_obj);
if (typed_array_obj != heap->undefined_value() && MustRecordSlots(heap)) {
Object** slot = HeapObject::RawField(array_buffer,
@@ -399,23 +453,21 @@ struct WeakListVisitor<AllocationSite> {
};
-template Object* VisitWeakList<Code>(Heap* heap, Object* list,
- WeakObjectRetainer* retainer);
-
-
-template Object* VisitWeakList<JSFunction>(Heap* heap, Object* list,
- WeakObjectRetainer* retainer);
-
-
template Object* VisitWeakList<Context>(Heap* heap, Object* list,
- WeakObjectRetainer* retainer);
+ WeakObjectRetainer* retainer,
+ bool stop_after_young);
template Object* VisitWeakList<JSArrayBuffer>(Heap* heap, Object* list,
- WeakObjectRetainer* retainer);
+ WeakObjectRetainer* retainer,
+ bool stop_after_young);
+template Object* VisitWeakList<JSArrayBufferView>(Heap* heap, Object* list,
+ WeakObjectRetainer* retainer,
+ bool stop_after_young);
template Object* VisitWeakList<AllocationSite>(Heap* heap, Object* list,
- WeakObjectRetainer* retainer);
+ WeakObjectRetainer* retainer,
+ bool stop_after_young);
}
} // namespace v8::internal
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index a442867569..2bc90457ac 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -413,6 +413,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
INLINE(static void VisitExternalReference(RelocInfo* rinfo)) {}
+ INLINE(static void VisitInternalReference(RelocInfo* rinfo)) {}
INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) {}
// Skip the weak next code link in a code object.
INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {}
@@ -489,7 +490,10 @@ class WeakObjectRetainer;
// pointers. The template parameter T is a WeakListVisitor that defines how to
// access the next-element pointers.
template <class T>
-Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
+Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer,
+ bool stop_after_young);
+Object* VisitNewArrayBufferViewsWeakList(Heap* heap, Object* list,
+ WeakObjectRetainer* retainer);
}
} // namespace v8::internal
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index c2ce5fcfaf..a1c31800c9 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -10,7 +10,7 @@
#include "src/heap/mark-compact.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
-#include "src/snapshot.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -45,7 +45,6 @@ HeapObjectIterator::HeapObjectIterator(Page* page,
owner == page->heap()->old_data_space() ||
owner == page->heap()->map_space() ||
owner == page->heap()->cell_space() ||
- owner == page->heap()->property_cell_space() ||
owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
page->area_end(), kOnePageOnly, size_func);
@@ -935,9 +934,6 @@ STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
ObjectSpace::kObjectSpaceCodeSpace);
STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CELL_SPACE) ==
ObjectSpace::kObjectSpaceCellSpace);
-STATIC_ASSERT(
- static_cast<ObjectSpace>(1 << AllocationSpace::PROPERTY_CELL_SPACE) ==
- ObjectSpace::kObjectSpacePropertyCellSpace);
STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
ObjectSpace::kObjectSpaceMapSpace);
@@ -1021,13 +1017,14 @@ Object* PagedSpace::FindObject(Address addr) {
bool PagedSpace::CanExpand() {
DCHECK(max_capacity_ % AreaSize() == 0);
-
- if (Capacity() == max_capacity_) return false;
-
- DCHECK(Capacity() < max_capacity_);
+ DCHECK(heap()->mark_compact_collector()->is_compacting() ||
+ Capacity() <= heap()->MaxOldGenerationSize());
+ DCHECK(heap()->CommittedOldGenerationMemory() <=
+ heap()->MaxOldGenerationSize() +
+ PagedSpace::MaxEmergencyMemoryAllocated());
// Are we going to exceed capacity for this space?
- if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
+ if (!heap()->CanExpandOldGeneration(Page::kPageSize)) return false;
return true;
}
@@ -1039,7 +1036,7 @@ bool PagedSpace::Expand() {
intptr_t size = AreaSize();
if (anchor_.next_page() == &anchor_) {
- size = Snapshot::SizeOfFirstPage(identity());
+ size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
}
Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
@@ -1049,7 +1046,10 @@ bool PagedSpace::Expand() {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
- DCHECK(Capacity() <= max_capacity_);
+ DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
+ DCHECK(heap()->CommittedOldGenerationMemory() <=
+ heap()->MaxOldGenerationSize() +
+ PagedSpace::MaxEmergencyMemoryAllocated());
p->InsertAfter(anchor_.prev_page());
@@ -1131,6 +1131,15 @@ void PagedSpace::ReleasePage(Page* page) {
}
+intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
+ // New space and large object space.
+ static const int spaces_without_emergency_memory = 2;
+ static const int spaces_with_emergency_memory =
+ LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
+ return Page::kPageSize * spaces_with_emergency_memory;
+}
+
+
void PagedSpace::CreateEmergencyMemory() {
if (identity() == CODE_SPACE) {
// Make the emergency block available to the allocator.
@@ -1156,6 +1165,11 @@ void PagedSpace::FreeEmergencyMemory() {
void PagedSpace::UseEmergencyMemory() {
+ // Page::Initialize makes the chunk into a real page and adds it to the
+ // accounting for this space. Unlike PagedSpace::Expand, we don't check
+ // CanExpand first, so we can go over the limits a little here. That's OK,
+ // because we are in the process of compacting which will free up at least as
+ // much memory as it allocates.
Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
page->InsertAfter(anchor_.prev_page());
emergency_memory_ = NULL;
@@ -2621,7 +2635,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
- if (object != NULL) return object;
+ return object;
}
// Try to expand the space and allocate in the new next page.
@@ -2794,7 +2808,7 @@ void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
// -----------------------------------------------------------------------------
-// CellSpace and PropertyCellSpace implementation
+// CellSpace implementation
// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
// there is at least one non-inlined virtual function. I would prefer to hide
// the VerifyObject definition behind VERIFY_HEAP.
@@ -2802,11 +2816,6 @@ void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
-void PropertyCellSpace::VerifyObject(HeapObject* object) {
- CHECK(object->IsPropertyCell());
-}
-
-
// -----------------------------------------------------------------------------
// LargeObjectIterator
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 2eae02953c..0272d59944 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -385,6 +385,12 @@ class MemoryChunk {
// to grey transition is performed in the value.
HAS_PROGRESS_BAR,
+ // This flag is intended to be used for testing. Works only when both
+ // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
+ // are set. It forces the page to become an evacuation candidate at next
+ // candidates selection cycle.
+ FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
+
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
@@ -1871,6 +1877,7 @@ class PagedSpace : public Space {
void CreateEmergencyMemory();
void FreeEmergencyMemory();
void UseEmergencyMemory();
+ intptr_t MaxEmergencyMemoryAllocated();
bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
@@ -2685,31 +2692,6 @@ class CellSpace : public PagedSpace {
// -----------------------------------------------------------------------------
-// Old space for all global object property cell objects
-
-class PropertyCellSpace : public PagedSpace {
- public:
- // Creates a property cell space object with a maximum capacity.
- PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
-
- virtual int RoundSizeDownToObjectAlignment(int size) {
- if (base::bits::IsPowerOfTwo32(PropertyCell::kSize)) {
- return RoundDown(size, PropertyCell::kSize);
- } else {
- return (size / PropertyCell::kSize) * PropertyCell::kSize;
- }
- }
-
- protected:
- virtual void VerifyObject(HeapObject* obj);
-
- public:
- TRACK_MEMORY("PropertyCellSpace")
-};
-
-
-// -----------------------------------------------------------------------------
// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
// the large object space. A large object is allocated from OS heap with
// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
diff --git a/deps/v8/src/heap/store-buffer-inl.h b/deps/v8/src/heap/store-buffer-inl.h
index 1606465a09..ccbe339534 100644
--- a/deps/v8/src/heap/store-buffer-inl.h
+++ b/deps/v8/src/heap/store-buffer-inl.h
@@ -49,14 +49,6 @@ void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
}
}
}
-
-
-void StoreBuffer::ClearDeadObject(HeapObject* object) {
- Address& map_field = Memory::Address_at(object->address());
- if (heap_->map_space()->Contains(map_field)) {
- map_field = NULL;
- }
-}
}
} // namespace v8::internal
diff --git a/deps/v8/src/heap/store-buffer.cc b/deps/v8/src/heap/store-buffer.cc
index 591d28fe9f..6bf3188717 100644
--- a/deps/v8/src/heap/store-buffer.cc
+++ b/deps/v8/src/heap/store-buffer.cc
@@ -6,7 +6,6 @@
#include "src/v8.h"
-#include "src/base/atomicops.h"
#include "src/counters.h"
#include "src/heap/store-buffer-inl.h"
@@ -108,26 +107,6 @@ void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
}
-void StoreBuffer::Uniq() {
- // Remove adjacent duplicates and cells that do not point at new space.
- Address previous = NULL;
- Address* write = old_start_;
- DCHECK(may_move_store_buffer_entries_);
- for (Address* read = old_start_; read < old_top_; read++) {
- Address current = *read;
- if (current != previous) {
- Object* object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(current)));
- if (heap_->InNewSpace(object)) {
- *write++ = current;
- }
- }
- previous = current;
- }
- old_top_ = write;
-}
-
-
bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
return old_limit_ - old_top_ >= space_needed;
}
@@ -247,20 +226,6 @@ void StoreBuffer::Filter(int flag) {
}
-void StoreBuffer::SortUniq() {
- Compact();
- if (old_buffer_is_sorted_) return;
- std::sort(old_start_, old_top_);
- Uniq();
-
- old_buffer_is_sorted_ = true;
-
- // Filtering hash sets are inconsistent with the store buffer after this
- // operation.
- ClearFilteringHashSets();
-}
-
-
bool StoreBuffer::PrepareForIteration() {
Compact();
PointerChunkIterator it(heap_);
@@ -285,41 +250,6 @@ bool StoreBuffer::PrepareForIteration() {
}
-#ifdef DEBUG
-void StoreBuffer::Clean() {
- ClearFilteringHashSets();
- Uniq(); // Also removes things that no longer point to new space.
- EnsureSpace(kStoreBufferSize / 2);
-}
-
-
-static Address* in_store_buffer_1_element_cache = NULL;
-
-
-bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
- if (!FLAG_enable_slow_asserts) return true;
- if (in_store_buffer_1_element_cache != NULL &&
- *in_store_buffer_1_element_cache == cell_address) {
- return true;
- }
- Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
- for (Address* current = top - 1; current >= start_; current--) {
- if (*current == cell_address) {
- in_store_buffer_1_element_cache = current;
- return true;
- }
- }
- for (Address* current = old_top_ - 1; current >= old_start_; current--) {
- if (*current == cell_address) {
- in_store_buffer_1_element_cache = current;
- return true;
- }
- }
- return false;
-}
-#endif
-
-
void StoreBuffer::ClearFilteringHashSets() {
if (!hash_sets_are_empty_) {
memset(reinterpret_cast<void*>(hash_set_1_), 0,
@@ -350,8 +280,7 @@ void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
// When we are not in GC the Heap::InNewSpace() predicate
// checks that pointers which satisfy predicate point into
// the active semispace.
- Object* object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+ Object* object = *slot;
heap_->InNewSpace(object);
slot_address += kPointerSize;
}
@@ -378,33 +307,40 @@ void StoreBuffer::GCEpilogue() {
}
+void StoreBuffer::ProcessOldToNewSlot(Address slot_address,
+ ObjectSlotCallback slot_callback) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ Object* object = *slot;
+
+ // If the object is not in from space, it must be a duplicate store buffer
+ // entry and the slot was already updated.
+ if (heap_->InFromSpace(object)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ DCHECK(heap_object->IsHeapObject());
+ slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+ object = *slot;
+ // If the object was in from space before and is after executing the
+ // callback in to space, the object is still live.
+ // Unfortunately, we do not know about the slot. It could be in a
+ // just freed free space object.
+ if (heap_->InToSpace(object)) {
+ EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
+ }
+ }
+}
+
+
void StoreBuffer::FindPointersToNewSpaceInRegion(
- Address start, Address end, ObjectSlotCallback slot_callback,
- bool clear_maps) {
+ Address start, Address end, ObjectSlotCallback slot_callback) {
for (Address slot_address = start; slot_address < end;
slot_address += kPointerSize) {
- Object** slot = reinterpret_cast<Object**>(slot_address);
- Object* object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
- if (heap_->InNewSpace(object)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- DCHECK(heap_object->IsHeapObject());
- // The new space object was not promoted if it still contains a map
- // pointer. Clear the map field now lazily.
- if (clear_maps) ClearDeadObject(heap_object);
- slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
- object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
- if (heap_->InNewSpace(object)) {
- EnterDirectlyIntoStoreBuffer(slot_address);
- }
- }
+ ProcessOldToNewSlot(slot_address, slot_callback);
}
}
-void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
- bool clear_maps) {
+void StoreBuffer::IteratePointersInStoreBuffer(
+ ObjectSlotCallback slot_callback) {
Address* limit = old_top_;
old_top_ = old_start_;
{
@@ -413,40 +349,57 @@ void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
#ifdef DEBUG
Address* saved_top = old_top_;
#endif
- Object** slot = reinterpret_cast<Object**>(*current);
- Object* object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
- if (heap_->InFromSpace(object)) {
- HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
- // The new space object was not promoted if it still contains a map
- // pointer. Clear the map field now lazily.
- if (clear_maps) ClearDeadObject(heap_object);
- slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
- object = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
- if (heap_->InNewSpace(object)) {
- EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
- }
- }
+ ProcessOldToNewSlot(*current, slot_callback);
DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
}
}
}
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
- IteratePointersToNewSpace(slot_callback, false);
+void StoreBuffer::ClearInvalidStoreBufferEntries() {
+ Compact();
+ Address* new_top = old_start_;
+ for (Address* current = old_start_; current < old_top_; current++) {
+ Address addr = *current;
+ Object** slot = reinterpret_cast<Object**>(addr);
+ Object* object = *slot;
+ if (heap_->InNewSpace(object) && object->IsHeapObject()) {
+ // If the target object is not black, the source slot must be part
+ // of a non-black (dead) object.
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (Marking::IsBlack(Marking::MarkBitFrom(heap_object)) &&
+ heap_->mark_compact_collector()->IsSlotInLiveObject(addr)) {
+ *new_top++ = addr;
+ }
+ }
+ }
+ old_top_ = new_top;
+ ClearFilteringHashSets();
+
+ // Don't scan on scavenge dead large objects.
+ LargeObjectIterator it(heap_->lo_space());
+ for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ if (chunk->scan_on_scavenge() && !Marking::MarkBitFrom(object).Get()) {
+ chunk->set_scan_on_scavenge(false);
+ }
+ }
}
-void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
- ObjectSlotCallback slot_callback) {
- IteratePointersToNewSpace(slot_callback, true);
+void StoreBuffer::VerifyValidStoreBufferEntries() {
+ for (Address* current = old_start_; current < old_top_; current++) {
+ Object** slot = reinterpret_cast<Object**>(*current);
+ Object* object = *slot;
+ CHECK(object->IsHeapObject());
+ CHECK(heap_->InNewSpace(object));
+ heap_->mark_compact_collector()->VerifyIsSlotInLiveObject(
+ reinterpret_cast<Address>(slot), HeapObject::cast(object));
+ }
}
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
- bool clear_maps) {
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
// We do not sort or remove duplicated entries from the store buffer because
// we expect that callback will rebuild the store buffer thus removing
// all duplicates and pointers to old space.
@@ -455,7 +408,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
// TODO(gc): we want to skip slots on evacuation candidates
// but we can't simply figure that out from slot address
// because slot can belong to a large object.
- IteratePointersInStoreBuffer(slot_callback, clear_maps);
+ IteratePointersInStoreBuffer(slot_callback);
// We are done scanning all the pointers that were in the store buffer, but
// there may be some pages marked scan_on_scavenge that have pointers to new
@@ -484,7 +437,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
DCHECK(array->IsFixedArray());
Address start = array->address();
Address end = start + array->Size();
- FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
+ FindPointersToNewSpaceInRegion(start, end, slot_callback);
} else {
Page* page = reinterpret_cast<Page*>(chunk);
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
@@ -499,7 +452,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
FindPointersToNewSpaceInRegion(
heap_object->address() + Map::kPointerFieldsBeginOffset,
heap_object->address() + Map::kPointerFieldsEndOffset,
- slot_callback, clear_maps);
+ slot_callback);
}
}
} else {
@@ -534,8 +487,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
&end_of_region_offset)) {
FindPointersToNewSpaceInRegion(
obj_address + offset,
- obj_address + end_of_region_offset, slot_callback,
- clear_maps);
+ obj_address + end_of_region_offset, slot_callback);
}
offset = end_of_region_offset;
}
@@ -545,7 +497,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
Address end_address = obj_address + end_offset;
// Object has only tagged fields.
FindPointersToNewSpaceInRegion(start_address, end_address,
- slot_callback, clear_maps);
+ slot_callback);
#if V8_DOUBLE_FIELDS_UNBOXING
}
#endif
diff --git a/deps/v8/src/heap/store-buffer.h b/deps/v8/src/heap/store-buffer.h
index 5efd6922bc..6c571fcdce 100644
--- a/deps/v8/src/heap/store-buffer.h
+++ b/deps/v8/src/heap/store-buffer.h
@@ -20,8 +20,7 @@ class StoreBuffer;
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
typedef void (StoreBuffer::*RegionCallback)(Address start, Address end,
- ObjectSlotCallback slot_callback,
- bool clear_maps);
+ ObjectSlotCallback slot_callback);
// Used to implement the write barrier by collecting addresses of pointers
// between spaces.
@@ -60,10 +59,6 @@ class StoreBuffer {
// surviving old-to-new pointers into the store buffer to rebuild it.
void IteratePointersToNewSpace(ObjectSlotCallback callback);
- // Same as IteratePointersToNewSpace but additonally clears maps in objects
- // referenced from the store buffer that do not contain a forwarding pointer.
- void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
-
static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
static const int kStoreBufferSize = kStoreBufferOverflowBit;
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
@@ -88,23 +83,20 @@ class StoreBuffer {
bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
- // Goes through the store buffer removing pointers to things that have
- // been promoted. Rebuilds the store buffer completely if it overflowed.
- void SortUniq();
-
void EnsureSpace(intptr_t space_needed);
void Verify();
bool PrepareForIteration();
-#ifdef DEBUG
- void Clean();
- // Slow, for asserts only.
- bool CellIsInStoreBuffer(Address cell);
-#endif
-
void Filter(int flag);
+ // Eliminates all stale store buffer entries from the store buffer, i.e.,
+ // slots that are not part of live objects anymore. This method must be
+ // called after marking, when the whole transitive closure is known and
+ // must be called before sweeping when mark bits are still intact.
+ void ClearInvalidStoreBufferEntries();
+ void VerifyValidStoreBufferEntries();
+
private:
Heap* heap_;
@@ -142,17 +134,13 @@ class StoreBuffer {
void ClearFilteringHashSets();
bool SpaceAvailable(intptr_t space_needed);
- void Uniq();
void ExemptPopularPages(int prime_sample_step, int threshold);
- // Set the map field of the object to NULL if contains a map.
- inline void ClearDeadObject(HeapObject* object);
-
- void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
+ void ProcessOldToNewSlot(Address slot_address,
+ ObjectSlotCallback slot_callback);
void FindPointersToNewSpaceInRegion(Address start, Address end,
- ObjectSlotCallback slot_callback,
- bool clear_maps);
+ ObjectSlotCallback slot_callback);
// For each region of pointers on a page in use from an old space call
// visit_pointer_region callback.
@@ -163,8 +151,7 @@ class StoreBuffer {
RegionCallback region_callback,
ObjectSlotCallback slot_callback);
- void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
- bool clear_maps);
+ void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback);
#ifdef VERIFY_HEAP
void VerifyPointers(LargeObjectSpace* space);
diff --git a/deps/v8/src/hydrogen-bce.cc b/deps/v8/src/hydrogen-bce.cc
index 3bf8e9f039..9d634a8fa8 100644
--- a/deps/v8/src/hydrogen-bce.cc
+++ b/deps/v8/src/hydrogen-bce.cc
@@ -380,7 +380,7 @@ BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
if (!i->IsBoundsCheck()) continue;
HBoundsCheck* check = HBoundsCheck::cast(i);
- int32_t offset;
+ int32_t offset = 0;
BoundsCheckKey* key =
BoundsCheckKey::Create(zone(), check, &offset);
if (key == NULL) continue;
diff --git a/deps/v8/src/hydrogen-gvn.cc b/deps/v8/src/hydrogen-gvn.cc
index da986e34cb..2ada8976ef 100644
--- a/deps/v8/src/hydrogen-gvn.cc
+++ b/deps/v8/src/hydrogen-gvn.cc
@@ -346,17 +346,20 @@ SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) {
int index;
SideEffects result(instr->ChangesFlags());
if (result.ContainsFlag(kGlobalVars)) {
- if (instr->IsStoreGlobalCell() &&
- ComputeGlobalVar(HStoreGlobalCell::cast(instr)->cell(), &index)) {
- result.RemoveFlag(kGlobalVars);
- result.AddSpecial(GlobalVar(index));
- } else {
- for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ if (instr->IsStoreNamedField()) {
+ HStoreNamedField* store = HStoreNamedField::cast(instr);
+ HConstant* target = HConstant::cast(store->object());
+ if (ComputeGlobalVar(Unique<PropertyCell>::cast(target->GetUnique()),
+ &index)) {
+ result.RemoveFlag(kGlobalVars);
result.AddSpecial(GlobalVar(index));
+ return result;
}
}
- }
- if (result.ContainsFlag(kInobjectFields)) {
+ for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ result.AddSpecial(GlobalVar(index));
+ }
+ } else if (result.ContainsFlag(kInobjectFields)) {
if (instr->IsStoreNamedField() &&
ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) {
result.RemoveFlag(kInobjectFields);
@@ -375,17 +378,20 @@ SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) {
int index;
SideEffects result(instr->DependsOnFlags());
if (result.ContainsFlag(kGlobalVars)) {
- if (instr->IsLoadGlobalCell() &&
- ComputeGlobalVar(HLoadGlobalCell::cast(instr)->cell(), &index)) {
- result.RemoveFlag(kGlobalVars);
- result.AddSpecial(GlobalVar(index));
- } else {
- for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ if (instr->IsLoadNamedField()) {
+ HLoadNamedField* load = HLoadNamedField::cast(instr);
+ HConstant* target = HConstant::cast(load->object());
+ if (ComputeGlobalVar(Unique<PropertyCell>::cast(target->GetUnique()),
+ &index)) {
+ result.RemoveFlag(kGlobalVars);
result.AddSpecial(GlobalVar(index));
+ return result;
}
}
- }
- if (result.ContainsFlag(kInobjectFields)) {
+ for (index = 0; index < kNumberOfGlobalVars; ++index) {
+ result.AddSpecial(GlobalVar(index));
+ }
+ } else if (result.ContainsFlag(kInobjectFields)) {
if (instr->IsLoadNamedField() &&
ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) {
result.RemoveFlag(kInobjectFields);
@@ -439,7 +445,8 @@ GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
}
-bool SideEffectsTracker::ComputeGlobalVar(Unique<Cell> cell, int* index) {
+bool SideEffectsTracker::ComputeGlobalVar(Unique<PropertyCell> cell,
+ int* index) {
for (int i = 0; i < num_global_vars_; ++i) {
if (cell == global_vars_[i]) {
*index = i;
diff --git a/deps/v8/src/hydrogen-gvn.h b/deps/v8/src/hydrogen-gvn.h
index d04a6eb3aa..542bc8a52b 100644
--- a/deps/v8/src/hydrogen-gvn.h
+++ b/deps/v8/src/hydrogen-gvn.h
@@ -70,7 +70,7 @@ class SideEffectsTracker FINAL BASE_EMBEDDED {
private:
friend std::ostream& operator<<(std::ostream& os, const TrackedEffects& f);
- bool ComputeGlobalVar(Unique<Cell> cell, int* index);
+ bool ComputeGlobalVar(Unique<PropertyCell> cell, int* index);
bool ComputeInobjectField(HObjectAccess access, int* index);
static int GlobalVar(int index) {
@@ -86,7 +86,7 @@ class SideEffectsTracker FINAL BASE_EMBEDDED {
// Track up to four global vars.
static const int kNumberOfGlobalVars = 4;
- Unique<Cell> global_vars_[kNumberOfGlobalVars];
+ Unique<PropertyCell> global_vars_[kNumberOfGlobalVars];
int num_global_vars_;
// Track up to n inobject fields.
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index b89bcc4d99..245c7c0403 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -871,7 +871,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kInvokeFunction:
case HValue::kLoadContextSlot:
case HValue::kLoadFunctionPrototype:
- case HValue::kLoadGlobalCell:
case HValue::kLoadKeyed:
case HValue::kLoadKeyedGeneric:
case HValue::kMathFloorOfDiv:
@@ -887,7 +886,6 @@ bool HInstruction::CanDeoptimize() {
case HValue::kSimulate:
case HValue::kStackCheck:
case HValue::kStoreContextSlot:
- case HValue::kStoreGlobalCell:
case HValue::kStoreKeyedGeneric:
case HValue::kStringAdd:
case HValue::kStringCompareAndBranch:
@@ -2946,7 +2944,7 @@ Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
HConstant(DoubleToInt32(double_value_), Representation::Integer32(),
NotInNewSpace(), object_);
}
- return Maybe<HConstant*>(res != NULL, res);
+ return res != NULL ? Just(res) : Nothing<HConstant*>();
}
@@ -2962,7 +2960,7 @@ Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Isolate* isolate,
} else if (handle->IsNull()) {
res = new(zone) HConstant(0);
}
- return Maybe<HConstant*>(res != NULL, res);
+ return res != NULL ? Just(res) : Nothing<HConstant*>();
}
@@ -3624,24 +3622,6 @@ std::ostream& HTransitionElementsKind::PrintDataTo(
}
-std::ostream& HLoadGlobalCell::PrintDataTo(std::ostream& os) const { // NOLINT
- os << "[" << *cell().handle() << "]";
- if (details_.IsConfigurable()) os << " (configurable)";
- if (details_.IsReadOnly()) os << " (read-only)";
- return os;
-}
-
-
-bool HLoadGlobalCell::RequiresHoleCheck() const {
- if (!details_.IsConfigurable()) return false;
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- if (!use->IsChange()) return true;
- }
- return false;
-}
-
-
std::ostream& HLoadGlobalGeneric::PrintDataTo(
std::ostream& os) const { // NOLINT
return os << name()->ToCString().get() << " ";
@@ -3655,14 +3635,6 @@ std::ostream& HInnerAllocatedObject::PrintDataTo(
}
-std::ostream& HStoreGlobalCell::PrintDataTo(std::ostream& os) const { // NOLINT
- os << "[" << *cell().handle() << "] = " << NameOf(value());
- if (details_.IsConfigurable()) os << " (configurable)";
- if (details_.IsReadOnly()) os << " (read-only)";
- return os;
-}
-
-
std::ostream& HLoadContextSlot::PrintDataTo(std::ostream& os) const { // NOLINT
return os << NameOf(value()) << "[" << slot_index() << "]";
}
@@ -3788,12 +3760,12 @@ bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
}
}
- DCHECK((IsNewSpaceAllocation() &&
- dominator_allocate->IsNewSpaceAllocation()) ||
- (IsOldDataSpaceAllocation() &&
- dominator_allocate->IsOldDataSpaceAllocation()) ||
- (IsOldPointerSpaceAllocation() &&
- dominator_allocate->IsOldPointerSpaceAllocation()));
+ DCHECK(
+ (IsNewSpaceAllocation() && dominator_allocate->IsNewSpaceAllocation()) ||
+ (IsOldDataSpaceAllocation() &&
+ dominator_allocate->IsOldDataSpaceAllocation()) ||
+ (IsOldPointerSpaceAllocation() &&
+ dominator_allocate->IsOldPointerSpaceAllocation()));
// First update the size of the dominator allocate instruction.
dominator_size = dominator_allocate->size();
@@ -3889,8 +3861,8 @@ HAllocate* HAllocate::GetFoldableDominator(HAllocate* dominator) {
// We cannot hoist old space allocations over new space allocations.
if (IsNewSpaceAllocation() || dominator->IsNewSpaceAllocation()) {
if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), new space hoisting\n",
- id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ PrintF("#%d (%s) cannot fold into #%d (%s), new space hoisting\n", id(),
+ Mnemonic(), dominator->id(), dominator->Mnemonic());
}
return NULL;
}
@@ -3903,8 +3875,8 @@ HAllocate* HAllocate::GetFoldableDominator(HAllocate* dominator) {
if (dominator_dominator == NULL) {
dominating_allocate_ = dominator;
if (FLAG_trace_allocation_folding) {
- PrintF("#%d (%s) cannot fold into #%d (%s), different spaces\n",
- id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+ PrintF("#%d (%s) cannot fold into #%d (%s), different spaces\n", id(),
+ Mnemonic(), dominator->id(), dominator->Mnemonic());
}
return NULL;
}
@@ -3917,16 +3889,16 @@ HAllocate* HAllocate::GetFoldableDominator(HAllocate* dominator) {
if (block()->block_id() != dominator_dominator->block()->block_id()) {
if (FLAG_trace_allocation_folding) {
PrintF("#%d (%s) cannot fold into #%d (%s), different basic blocks\n",
- id(), Mnemonic(), dominator_dominator->id(),
- dominator_dominator->Mnemonic());
+ id(), Mnemonic(), dominator_dominator->id(),
+ dominator_dominator->Mnemonic());
}
return NULL;
}
DCHECK((IsOldDataSpaceAllocation() &&
- dominator_dominator->IsOldDataSpaceAllocation()) ||
+ dominator_dominator->IsOldDataSpaceAllocation()) ||
(IsOldPointerSpaceAllocation() &&
- dominator_dominator->IsOldPointerSpaceAllocation()));
+ dominator_dominator->IsOldPointerSpaceAllocation()));
int32_t current_size = HConstant::cast(size())->GetInteger32Constant();
HStoreNamedField* dominator_free_space_size =
@@ -4695,12 +4667,6 @@ HObjectAccess HObjectAccess::ForField(Handle<Map> map, int index,
}
-HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
- return HObjectAccess(kInobject, Cell::kValueOffset, Representation::Tagged(),
- isolate->factory()->cell_value_string());
-}
-
-
void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
// set the appropriate GVN flags for a given load or store instruction
if (access_type == STORE) {
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 8fd2aed0af..2461ee46b5 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -117,7 +117,6 @@ class LChunkBuilder;
V(LoadContextSlot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
@@ -146,7 +145,6 @@ class LChunkBuilder;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -982,7 +980,7 @@ class HPositionInfo {
if (has_operand_positions()) {
return operand_positions()[kInstructionPosIndex];
}
- return SourcePosition(static_cast<int>(UntagPosition(data_)));
+ return SourcePosition::FromRaw(static_cast<int>(UntagPosition(data_)));
}
void set_position(SourcePosition pos) {
@@ -1961,6 +1959,8 @@ class HEnterInlined FINAL : public HTemplateInstruction<0> {
FunctionLiteral* function() const { return function_; }
InliningKind inlining_kind() const { return inlining_kind_; }
BailoutId ReturnId() const { return return_id_; }
+ int inlining_id() const { return inlining_id_; }
+ void set_inlining_id(int inlining_id) { inlining_id_ = inlining_id; }
Representation RequiredInputRepresentation(int index) OVERRIDE {
return Representation::None();
@@ -1984,6 +1984,7 @@ class HEnterInlined FINAL : public HTemplateInstruction<0> {
arguments_pushed_(false),
function_(function),
inlining_kind_(inlining_kind),
+ inlining_id_(0),
arguments_var_(arguments_var),
arguments_object_(arguments_object),
return_targets_(2, zone) {}
@@ -1995,6 +1996,7 @@ class HEnterInlined FINAL : public HTemplateInstruction<0> {
bool arguments_pushed_;
FunctionLiteral* function_;
InliningKind inlining_kind_;
+ int inlining_id_;
Variable* arguments_var_;
HArgumentsObject* arguments_object_;
ZoneList<HBasicBlock*> return_targets_;
@@ -2572,7 +2574,9 @@ class HUnaryMathOperation FINAL : public HTemplateInstruction<2> {
// Math.round.
bool SupportsFlexibleFloorAndRound() const {
#ifdef V8_TARGET_ARCH_ARM64
- return true;
+ // TODO(rmcilroy): Re-enable this for Arm64 once http://crbug.com/476477 is
+ // fixed.
+ return false;
#else
return false;
#endif
@@ -3497,7 +3501,7 @@ class HConstant FINAL : public HTemplateInstruction<0> {
bool IsCell() const {
InstanceType instance_type = GetInstanceType();
- return instance_type == CELL_TYPE || instance_type == PROPERTY_CELL_TYPE;
+ return instance_type == CELL_TYPE;
}
Representation RequiredInputRepresentation(int index) OVERRIDE {
@@ -5410,46 +5414,6 @@ class HUnknownOSRValue FINAL : public HTemplateInstruction<0> {
};
-class HLoadGlobalCell FINAL : public HTemplateInstruction<0> {
- public:
- DECLARE_INSTRUCTION_FACTORY_P2(HLoadGlobalCell, Handle<Cell>,
- PropertyDetails);
-
- Unique<Cell> cell() const { return cell_; }
- bool RequiresHoleCheck() const;
-
- std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE; // NOLINT
-
- intptr_t Hashcode() OVERRIDE { return cell_.Hashcode(); }
-
- void FinalizeUniqueness() OVERRIDE { cell_ = Unique<Cell>(cell_.handle()); }
-
- Representation RequiredInputRepresentation(int index) OVERRIDE {
- return Representation::None();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
-
- protected:
- bool DataEquals(HValue* other) OVERRIDE {
- return cell_ == HLoadGlobalCell::cast(other)->cell_;
- }
-
- private:
- HLoadGlobalCell(Handle<Cell> cell, PropertyDetails details)
- : cell_(Unique<Cell>::CreateUninitialized(cell)), details_(details) {
- set_representation(Representation::Tagged());
- SetFlag(kUseGVN);
- SetDependsOnFlag(kGlobalVars);
- }
-
- bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
-
- Unique<Cell> cell_;
- PropertyDetails details_;
-};
-
-
class HLoadGlobalGeneric FINAL : public HTemplateInstruction<2> {
public:
DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
@@ -5623,9 +5587,10 @@ class HAllocate FINAL : public HTemplateInstruction<2> {
static Flags ComputeFlags(PretenureFlag pretenure_flag,
InstanceType instance_type) {
Flags flags = pretenure_flag == TENURED
- ? (Heap::TargetSpaceId(instance_type) == OLD_POINTER_SPACE
- ? ALLOCATE_IN_OLD_POINTER_SPACE : ALLOCATE_IN_OLD_DATA_SPACE)
- : ALLOCATE_IN_NEW_SPACE;
+ ? (Heap::TargetSpaceId(instance_type) == OLD_POINTER_SPACE
+ ? ALLOCATE_IN_OLD_POINTER_SPACE
+ : ALLOCATE_IN_OLD_DATA_SPACE)
+ : ALLOCATE_IN_NEW_SPACE;
if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
flags = static_cast<Flags>(flags | ALLOCATE_DOUBLE_ALIGNED);
}
@@ -5667,8 +5632,9 @@ class HAllocate FINAL : public HTemplateInstruction<2> {
bool IsFoldable(HAllocate* allocate) {
return (IsNewSpaceAllocation() && allocate->IsNewSpaceAllocation()) ||
- (IsOldDataSpaceAllocation() && allocate->IsOldDataSpaceAllocation()) ||
- (IsOldPointerSpaceAllocation() &&
+ (IsOldDataSpaceAllocation() &&
+ allocate->IsOldDataSpaceAllocation()) ||
+ (IsOldPointerSpaceAllocation() &&
allocate->IsOldPointerSpaceAllocation());
}
@@ -5801,43 +5767,6 @@ inline PointersToHereCheck PointersToHereCheckForObject(HValue* object,
}
-class HStoreGlobalCell FINAL : public HUnaryOperation {
- public:
- DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
- Handle<PropertyCell>, PropertyDetails);
-
- Unique<PropertyCell> cell() const { return cell_; }
- bool RequiresHoleCheck() { return details_.IsConfigurable(); }
- bool NeedsWriteBarrier() {
- return StoringValueNeedsWriteBarrier(value());
- }
-
- void FinalizeUniqueness() OVERRIDE {
- cell_ = Unique<PropertyCell>(cell_.handle());
- }
-
- Representation RequiredInputRepresentation(int index) OVERRIDE {
- return Representation::Tagged();
- }
- std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE; // NOLINT
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
-
- private:
- HStoreGlobalCell(HValue* value,
- Handle<PropertyCell> cell,
- PropertyDetails details)
- : HUnaryOperation(value),
- cell_(Unique<PropertyCell>::CreateUninitialized(cell)),
- details_(details) {
- SetChangesFlag(kGlobalVars);
- }
-
- Unique<PropertyCell> cell_;
- PropertyDetails details_;
-};
-
-
class HLoadContextSlot FINAL : public HUnaryOperation {
public:
enum Mode {
@@ -6222,9 +6151,6 @@ class HObjectAccess FINAL {
Representation representation,
Handle<String> name);
- // Create an access for the payload of a Cell or JSGlobalPropertyCell.
- static HObjectAccess ForCellPayload(Isolate* isolate);
-
static HObjectAccess ForJSTypedArrayLength() {
return HObjectAccess::ForObservableJSObjectOffset(
JSTypedArray::kLengthOffset);
@@ -6509,13 +6435,16 @@ class HLoadNamedField FINAL : public HTemplateInstruction<2> {
class HLoadNamedGeneric FINAL : public HTemplateInstruction<2> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadNamedGeneric, HValue*,
- Handle<Object>);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadNamedGeneric, HValue*,
+ Handle<Object>, InlineCacheState);
HValue* context() const { return OperandAt(0); }
HValue* object() const { return OperandAt(1); }
Handle<Object> name() const { return name_; }
+ InlineCacheState initialization_state() const {
+ return initialization_state_;
+ }
FeedbackVectorICSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
@@ -6537,8 +6466,11 @@ class HLoadNamedGeneric FINAL : public HTemplateInstruction<2> {
DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
private:
- HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
- : name_(name), slot_(FeedbackVectorICSlot::Invalid()) {
+ HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name,
+ InlineCacheState initialization_state)
+ : name_(name),
+ slot_(FeedbackVectorICSlot::Invalid()),
+ initialization_state_(initialization_state) {
SetOperandAt(0, context);
SetOperandAt(1, object);
set_representation(Representation::Tagged());
@@ -6548,6 +6480,7 @@ class HLoadNamedGeneric FINAL : public HTemplateInstruction<2> {
Handle<Object> name_;
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorICSlot slot_;
+ InlineCacheState initialization_state_;
};
@@ -6787,11 +6720,14 @@ class HLoadKeyed FINAL
class HLoadKeyedGeneric FINAL : public HTemplateInstruction<3> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadKeyedGeneric, HValue*,
- HValue*);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadKeyedGeneric, HValue*,
+ HValue*, InlineCacheState);
HValue* object() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* context() const { return OperandAt(2); }
+ InlineCacheState initialization_state() const {
+ return initialization_state_;
+ }
FeedbackVectorICSlot slot() const { return slot_; }
Handle<TypeFeedbackVector> feedback_vector() const {
return feedback_vector_;
@@ -6816,8 +6752,10 @@ class HLoadKeyedGeneric FINAL : public HTemplateInstruction<3> {
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
private:
- HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key)
- : slot_(FeedbackVectorICSlot::Invalid()) {
+ HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
+ InlineCacheState initialization_state)
+ : slot_(FeedbackVectorICSlot::Invalid()),
+ initialization_state_(initialization_state) {
set_representation(Representation::Tagged());
SetOperandAt(0, obj);
SetOperandAt(1, key);
@@ -6827,6 +6765,7 @@ class HLoadKeyedGeneric FINAL : public HTemplateInstruction<3> {
Handle<TypeFeedbackVector> feedback_vector_;
FeedbackVectorICSlot slot_;
+ InlineCacheState initialization_state_;
};
@@ -6915,14 +6854,6 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
SetChangesFlag(kMaps);
}
- void MarkReceiverAsCell() {
- bit_field_ = ReceiverIsCellField::update(bit_field_, true);
- }
-
- bool receiver_is_cell() const {
- return ReceiverIsCellField::decode(bit_field_);
- }
-
bool NeedsWriteBarrier() const {
DCHECK(!field_representation().IsDouble() ||
(FLAG_unbox_double_fields && access_.IsInobject()) ||
@@ -6931,7 +6862,6 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
if (field_representation().IsSmi()) return false;
if (field_representation().IsInteger32()) return false;
if (field_representation().IsExternal()) return false;
- if (receiver_is_cell()) return false;
return StoringValueNeedsWriteBarrier(value()) &&
ReceiverObjectNeedsWriteBarrier(object(), value(), dominator());
}
@@ -6991,7 +6921,6 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
class HasTransitionField : public BitField<bool, 0, 1> {};
class StoreModeField : public BitField<StoreFieldOrKeyedMode, 1, 1> {};
- class ReceiverIsCellField : public BitField<bool, 2, 1> {};
HObjectAccess access_;
HValue* dominator_;
@@ -7001,14 +6930,17 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
class HStoreNamedGeneric FINAL : public HTemplateInstruction<3> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HStoreNamedGeneric, HValue*,
Handle<String>, HValue*,
- LanguageMode);
+ LanguageMode, InlineCacheState);
HValue* object() const { return OperandAt(0); }
HValue* value() const { return OperandAt(1); }
HValue* context() const { return OperandAt(2); }
Handle<String> name() const { return name_; }
LanguageMode language_mode() const { return language_mode_; }
+ InlineCacheState initialization_state() const {
+ return initialization_state_;
+ }
std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE; // NOLINT
@@ -7020,8 +6952,11 @@ class HStoreNamedGeneric FINAL : public HTemplateInstruction<3> {
private:
HStoreNamedGeneric(HValue* context, HValue* object, Handle<String> name,
- HValue* value, LanguageMode language_mode)
- : name_(name), language_mode_(language_mode) {
+ HValue* value, LanguageMode language_mode,
+ InlineCacheState initialization_state)
+ : name_(name),
+ language_mode_(language_mode),
+ initialization_state_(initialization_state) {
SetOperandAt(0, object);
SetOperandAt(1, value);
SetOperandAt(2, context);
@@ -7030,6 +6965,7 @@ class HStoreNamedGeneric FINAL : public HTemplateInstruction<3> {
Handle<String> name_;
LanguageMode language_mode_;
+ InlineCacheState initialization_state_;
};
@@ -7219,14 +7155,18 @@ class HStoreKeyed FINAL
class HStoreKeyedGeneric FINAL : public HTemplateInstruction<4> {
public:
- DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
- HValue*, HValue*, LanguageMode);
+ DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HStoreKeyedGeneric, HValue*,
+ HValue*, HValue*, LanguageMode,
+ InlineCacheState);
HValue* object() const { return OperandAt(0); }
HValue* key() const { return OperandAt(1); }
HValue* value() const { return OperandAt(2); }
HValue* context() const { return OperandAt(3); }
LanguageMode language_mode() const { return language_mode_; }
+ InlineCacheState initialization_state() const {
+ return initialization_state_;
+ }
Representation RequiredInputRepresentation(int index) OVERRIDE {
// tagged[tagged] = tagged
@@ -7239,8 +7179,10 @@ class HStoreKeyedGeneric FINAL : public HTemplateInstruction<4> {
private:
HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
- HValue* value, LanguageMode language_mode)
- : language_mode_(language_mode) {
+ HValue* value, LanguageMode language_mode,
+ InlineCacheState initialization_state)
+ : language_mode_(language_mode),
+ initialization_state_(initialization_state) {
SetOperandAt(0, object);
SetOperandAt(1, key);
SetOperandAt(2, value);
@@ -7249,6 +7191,7 @@ class HStoreKeyedGeneric FINAL : public HTemplateInstruction<4> {
}
LanguageMode language_mode_;
+ InlineCacheState initialization_state_;
};
@@ -7562,11 +7505,11 @@ class HFunctionLiteral FINAL : public HTemplateInstruction<1> {
bool IsDeletable() const OVERRIDE { return true; }
- class FunctionKindField : public BitField<FunctionKind, 0, 6> {};
- class PretenureField : public BitField<bool, 6, 1> {};
- class HasNoLiteralsField : public BitField<bool, 7, 1> {};
+ class FunctionKindField : public BitField<FunctionKind, 0, 8> {};
+ class PretenureField : public BitField<bool, 8, 1> {};
+ class HasNoLiteralsField : public BitField<bool, 9, 1> {};
STATIC_ASSERT(LANGUAGE_END == 3);
- class LanguageModeField : public BitField<LanguageMode, 8, 2> {};
+ class LanguageModeField : public BitField<LanguageMode, 10, 2> {};
Handle<SharedFunctionInfo> shared_info_;
uint32_t bit_field_;
diff --git a/deps/v8/src/hydrogen-representation-changes.cc b/deps/v8/src/hydrogen-representation-changes.cc
index bfc8271a9b..33adf5aa9a 100644
--- a/deps/v8/src/hydrogen-representation-changes.cc
+++ b/deps/v8/src/hydrogen-representation-changes.cc
@@ -29,7 +29,7 @@ void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
// Try to create a new copy of the constant with the new representation.
if (is_truncating_to_int && to.IsInteger32()) {
Maybe<HConstant*> res = constant->CopyToTruncatedInt32(graph()->zone());
- if (res.has_value) new_value = res.value;
+ if (res.IsJust()) new_value = res.FromJust();
} else {
new_value = constant->CopyToRepresentation(to, graph()->zone());
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 8551b10230..ae717e4ad9 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -1751,8 +1751,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
details_index->ClearFlag(HValue::kCanOverflow);
HValue* details =
Add<HLoadKeyed>(elements, details_index, nullptr, FAST_ELEMENTS);
- int details_mask = PropertyDetails::TypeField::kMask |
- PropertyDetails::DeletedField::kMask;
+ int details_mask = PropertyDetails::TypeField::kMask;
details = AddUncasted<HBitwise>(Token::BIT_AND, details,
Add<HConstant>(details_mask));
IfBuilder details_compare(this);
@@ -2320,10 +2319,8 @@ HValue* HGraphBuilder::BuildUncheckedStringAdd(
{
// Fallback to the runtime to add the two strings.
Add<HPushArguments>(left, right);
- Push(Add<HCallRuntime>(
- isolate()->factory()->empty_string(),
- Runtime::FunctionForId(Runtime::kStringAdd),
- 2));
+ Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kStringAddRT), 2));
}
if_sameencodingandsequential.End();
}
@@ -3350,7 +3347,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
// to know it's the initial state.
function_state_ = &initial_function_state_;
InitializeAstVisitor(info->isolate(), info->zone());
- if (FLAG_hydrogen_track_positions) {
+ if (top_info()->is_tracking_positions()) {
SetSourcePosition(info->shared_info()->start_position());
}
}
@@ -3443,6 +3440,7 @@ HGraph::HGraph(CompilationInfo* info)
info_(info),
zone_(info->zone()),
is_recursive_(false),
+ this_has_uses_(false),
use_optimistic_licm_(false),
depends_on_empty_array_proto_elements_(false),
type_change_checksum_(0),
@@ -3455,7 +3453,10 @@ HGraph::HGraph(CompilationInfo* info)
start_environment_ = new (zone_)
HEnvironment(zone_, descriptor.GetEnvironmentParameterCount());
} else {
- info->TraceInlinedFunction(info->shared_info(), SourcePosition::Unknown());
+ if (info->is_tracking_positions()) {
+ info->TraceInlinedFunction(info->shared_info(), SourcePosition::Unknown(),
+ InlinedFunctionInfo::kNoParentId);
+ }
start_environment_ =
new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
}
@@ -3484,13 +3485,9 @@ void HGraph::FinalizeUniqueness() {
int HGraph::SourcePositionToScriptPosition(SourcePosition pos) {
- if (!FLAG_hydrogen_track_positions || pos.IsUnknown()) {
- return pos.raw();
- }
-
- const int id = info()->inlining_id_to_function_id()->at(pos.inlining_id());
- return info()->inlined_function_infos()->at(id).start_position() +
- pos.position();
+ return (FLAG_hydrogen_track_positions && !pos.IsUnknown())
+ ? info()->start_position_for(pos.inlining_id()) + pos.position()
+ : pos.raw();
}
@@ -3914,7 +3911,7 @@ FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
// Push on the state stack.
owner->set_function_state(this);
- if (FLAG_hydrogen_track_positions) {
+ if (compilation_info_->is_tracking_positions()) {
outer_source_position_ = owner->source_position();
owner->EnterInlinedSource(
info->shared_info()->start_position(),
@@ -3928,7 +3925,7 @@ FunctionState::~FunctionState() {
delete test_context_;
owner_->set_function_state(outer_);
- if (FLAG_hydrogen_track_positions) {
+ if (compilation_info_->is_tracking_positions()) {
owner_->set_source_position(outer_source_position_);
owner_->EnterInlinedSource(
outer_->compilation_info()->shared_info()->start_position(),
@@ -5267,6 +5264,7 @@ HOptimizedGraphBuilder::LookupGlobalProperty(Variable* var, LookupIterator* it,
case LookupIterator::ACCESSOR:
case LookupIterator::ACCESS_CHECK:
case LookupIterator::INTERCEPTOR:
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::NOT_FOUND:
return kUseGeneric;
case LookupIterator::DATA:
@@ -5296,7 +5294,7 @@ HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (expr->is_this()) {
- current_info()->set_this_has_uses(true);
+ graph()->MarkThisHasUses();
}
DCHECK(!HasStackOverflow());
@@ -5320,7 +5318,8 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Handle<GlobalObject> global(current_info()->global_object());
- if (FLAG_harmony_scoping) {
+ // Lookup in script contexts.
+ {
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
ScriptContextTable::LookupResult lookup;
@@ -5329,7 +5328,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
Handle<Context> script_context = ScriptContextTable::GetContext(
script_contexts, lookup.context_index);
Handle<Object> current_value =
- FixedArray::get(script_context, lookup.context_index);
+ FixedArray::get(script_context, lookup.slot_index);
// If the values is not the hole, it will stay initialized,
// so no need to generate a check.
@@ -5349,9 +5348,9 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
if (type == kUseCell) {
Handle<PropertyCell> cell = it.GetPropertyCell();
- if (cell->type()->IsConstant()) {
- PropertyCell::AddDependentCompilationInfo(cell, top_info());
- Handle<Object> constant_object = cell->type()->AsConstant()->Value();
+ PropertyCell::AddDependentCompilationInfo(cell, top_info());
+ if (it.property_details().cell_type() == PropertyCellType::kConstant) {
+ Handle<Object> constant_object(cell->value(), isolate());
if (constant_object->IsConsString()) {
constant_object =
String::Flatten(Handle<String>::cast(constant_object));
@@ -5359,8 +5358,11 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
HConstant* constant = New<HConstant>(constant_object);
return ast_context()->ReturnInstruction(constant, expr->id());
} else {
- HLoadGlobalCell* instr =
- New<HLoadGlobalCell>(cell, it.property_details());
+ HConstant* cell_constant = Add<HConstant>(cell);
+ HLoadNamedField* instr = New<HLoadNamedField>(
+ cell_constant, nullptr, HObjectAccess::ForPropertyCellValue());
+ instr->ClearDependsOnFlag(kInobjectFields);
+ instr->SetDependsOnFlag(kGlobalVars);
return ast_context()->ReturnInstruction(instr, expr->id());
}
} else {
@@ -5445,9 +5447,10 @@ void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
static bool CanInlinePropertyAccess(Handle<Map> map) {
if (map->instance_type() == HEAP_NUMBER_TYPE) return true;
if (map->instance_type() < FIRST_NONSTRING_TYPE) return true;
- return map->IsJSObjectMap() &&
- !map->is_dictionary_map() &&
- !map->has_named_interceptor();
+ return map->IsJSObjectMap() && !map->is_dictionary_map() &&
+ !map->has_named_interceptor() &&
+ // TODO(verwaest): Whitelist contexts to which we have access.
+ !map->is_access_check_needed();
}
@@ -5963,7 +5966,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
if (!map_->IsJSObjectMap()) return true;
- lookup_.LookupDescriptor(*map_, *name_);
+ LookupDescriptor(*map_, *name_);
return LoadResult(map_);
}
@@ -5979,7 +5982,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
access_ = HObjectAccess::ForField(map, index, representation(), name_);
// Load field map for heap objects.
- LoadFieldMaps(map);
+ return LoadFieldMaps(map);
} else if (IsAccessorConstant()) {
Handle<Object> accessors = GetAccessorsFromMap(map);
if (!accessors->IsAccessorPair()) return false;
@@ -6005,7 +6008,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
}
-void HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
Handle<Map> map) {
// Clear any previously collected field maps/type.
field_maps_.Clear();
@@ -6016,19 +6019,26 @@ void HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
// Collect the (stable) maps from the field type.
int num_field_maps = field_type->NumClasses();
- if (num_field_maps == 0) return;
- DCHECK(access_.representation().IsHeapObject());
- field_maps_.Reserve(num_field_maps, zone());
- HeapType::Iterator<Map> it = field_type->Classes();
- while (!it.Done()) {
- Handle<Map> field_map = it.Current();
- if (!field_map->is_stable()) {
- field_maps_.Clear();
- return;
+ if (num_field_maps > 0) {
+ DCHECK(access_.representation().IsHeapObject());
+ field_maps_.Reserve(num_field_maps, zone());
+ HeapType::Iterator<Map> it = field_type->Classes();
+ while (!it.Done()) {
+ Handle<Map> field_map = it.Current();
+ if (!field_map->is_stable()) {
+ field_maps_.Clear();
+ break;
+ }
+ field_maps_.Add(field_map, zone());
+ it.Advance();
}
- field_maps_.Add(field_map, zone());
- it.Advance();
}
+
+ if (field_maps_.is_empty()) {
+ // Store is not safe if the field map was cleared.
+ return IsLoad() || !field_type->Is(HeapType::None());
+ }
+
field_maps_.Sort();
DCHECK_EQ(num_field_maps, field_maps_.length());
@@ -6039,6 +6049,7 @@ void HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
// Add dependency on the map that introduced the field.
Map::AddDependentCompilationInfo(GetFieldOwnerFromMap(map),
DependentCode::kFieldTypeGroup, top_info());
+ return true;
}
@@ -6052,17 +6063,23 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
}
map = Handle<Map>(holder_->map());
if (!CanInlinePropertyAccess(map)) {
- lookup_.NotFound();
+ NotFound();
return false;
}
- lookup_.LookupDescriptor(*map, *name_);
+ LookupDescriptor(*map, *name_);
if (IsFound()) return LoadResult(map);
}
- lookup_.NotFound();
+ NotFound();
return true;
}
+bool HOptimizedGraphBuilder::PropertyAccessInfo::IsIntegerIndexedExotic() {
+ InstanceType instance_type = map_->instance_type();
+ return instance_type == JS_TYPED_ARRAY_TYPE && IsNonArrayIndexInteger(*name_);
+}
+
+
bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
if (!CanInlinePropertyAccess(map_)) return false;
if (IsJSObjectFieldAccessor()) return IsLoad();
@@ -6072,12 +6089,13 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
}
if (!LookupDescriptor()) return false;
if (IsFound()) return IsLoad() || !IsReadOnly();
+ if (IsIntegerIndexedExotic()) return false;
if (!LookupInPrototypes()) return false;
if (IsLoad()) return true;
if (IsAccessorConstant()) return true;
- lookup_.LookupTransition(*map_, *name_, NONE);
- if (lookup_.IsTransitionToData() && map_->unused_property_fields() > 0) {
+ LookupTransition(*map_, *name_, NONE);
+ if (IsTransitionToData() && map_->unused_property_fields() > 0) {
// Construct the object field access.
int descriptor = transition()->LastAdded();
int index =
@@ -6089,8 +6107,7 @@ bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
access_ = HObjectAccess::ForField(map_, index, representation, name_);
// Load field map for heap objects.
- LoadFieldMaps(transition());
- return true;
+ return LoadFieldMaps(transition());
}
return false;
}
@@ -6464,7 +6481,8 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
BailoutId ast_id) {
Handle<GlobalObject> global(current_info()->global_object());
- if (FLAG_harmony_scoping) {
+ // Lookup in script contexts.
+ {
Handle<ScriptContextTable> script_contexts(
global->native_context()->script_context_table());
ScriptContextTable::LookupResult lookup;
@@ -6474,6 +6492,16 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
}
Handle<Context> script_context =
ScriptContextTable::GetContext(script_contexts, lookup.context_index);
+
+ Handle<Object> current_value =
+ FixedArray::get(script_context, lookup.slot_index);
+
+ // If the values is not the hole, it will stay initialized,
+ // so no need to generate a check.
+ if (*current_value == *isolate()->factory()->the_hole_value()) {
+ return Bailout(kReferenceToUninitializedVariable);
+ }
+
HStoreNamedField* instr = Add<HStoreNamedField>(
Add<HConstant>(script_context),
HObjectAccess::ForContextSlot(lookup.slot_index), value);
@@ -6488,8 +6516,9 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
GlobalPropertyAccess type = LookupGlobalProperty(var, &it, STORE);
if (type == kUseCell) {
Handle<PropertyCell> cell = it.GetPropertyCell();
- if (cell->type()->IsConstant()) {
- Handle<Object> constant = cell->type()->AsConstant()->Value();
+ PropertyCell::AddDependentCompilationInfo(cell, top_info());
+ if (it.property_details().cell_type() == PropertyCellType::kConstant) {
+ Handle<Object> constant(cell->value(), isolate());
if (value->IsConstant()) {
HConstant* c_value = HConstant::cast(value);
if (!constant.is_identical_to(c_value->handle(isolate()))) {
@@ -6511,8 +6540,11 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
builder.End();
}
}
- HInstruction* instr =
- Add<HStoreGlobalCell>(value, cell, it.property_details());
+ HConstant* cell_constant = Add<HConstant>(cell);
+ HInstruction* instr = Add<HStoreNamedField>(
+ cell_constant, HObjectAccess::ForPropertyCellValue(), value);
+ instr->ClearChangesFlag(kInobjectFields);
+ instr->SetChangesFlag(kGlobalVars);
if (instr->HasObservableSideEffects()) {
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
}
@@ -6520,8 +6552,9 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
HValue* global_object = Add<HLoadNamedField>(
context(), nullptr,
HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
- HStoreNamedGeneric* instr = Add<HStoreNamedGeneric>(
- global_object, var->name(), value, function_language_mode());
+ HStoreNamedGeneric* instr =
+ Add<HStoreNamedGeneric>(global_object, var->name(), value,
+ function_language_mode(), PREMONOMORPHIC);
USE(instr);
DCHECK(instr->HasObservableSideEffects());
Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -6778,7 +6811,7 @@ void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
CHECK_ALIVE(VisitForValue(expr->exception()));
HValue* value = environment()->Pop();
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
Add<HPushArguments>(value);
Add<HCallRuntime>(isolate()->factory()->empty_string(),
Runtime::FunctionForId(Runtime::kThrow), 1);
@@ -6819,19 +6852,16 @@ HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
- PropertyAccessType access_type,
- Expression* expr,
- HValue* object,
- Handle<String> name,
- HValue* value,
- bool is_uninitialized) {
+ PropertyAccessType access_type, Expression* expr, HValue* object,
+ Handle<String> name, HValue* value, bool is_uninitialized) {
if (is_uninitialized) {
Add<HDeoptimize>(
Deoptimizer::kInsufficientTypeFeedbackForGenericNamedAccess,
Deoptimizer::SOFT);
}
if (access_type == LOAD) {
- HLoadNamedGeneric* result = New<HLoadNamedGeneric>(object, name);
+ HLoadNamedGeneric* result =
+ New<HLoadNamedGeneric>(object, name, PREMONOMORPHIC);
if (FLAG_vector_ics) {
Handle<SharedFunctionInfo> current_shared =
function_state()->compilation_info()->shared_info();
@@ -6843,7 +6873,7 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
return result;
} else {
return New<HStoreNamedGeneric>(object, name, value,
- function_language_mode());
+ function_language_mode(), PREMONOMORPHIC);
}
}
@@ -6856,7 +6886,8 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
HValue* key,
HValue* value) {
if (access_type == LOAD) {
- HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(object, key);
+ HLoadKeyedGeneric* result =
+ New<HLoadKeyedGeneric>(object, key, PREMONOMORPHIC);
if (FLAG_vector_ics) {
Handle<SharedFunctionInfo> current_shared =
function_state()->compilation_info()->shared_info();
@@ -6867,8 +6898,8 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
}
return result;
} else {
- return New<HStoreKeyedGeneric>(object, key, value,
- function_language_mode());
+ return New<HStoreKeyedGeneric>(object, key, value, function_language_mode(),
+ PREMONOMORPHIC);
}
}
@@ -6926,7 +6957,7 @@ HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
static bool CanInlineElementAccess(Handle<Map> map) {
return map->IsJSObjectMap() && !map->has_slow_elements_kind() &&
- !map->has_indexed_interceptor();
+ !map->has_indexed_interceptor() && !map->is_access_check_needed();
}
@@ -7785,8 +7816,11 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
int arguments_count,
HValue* implicit_return_value,
BailoutId ast_id, BailoutId return_id,
- InliningKind inlining_kind,
- SourcePosition position) {
+ InliningKind inlining_kind) {
+ if (target->context()->native_context() !=
+ top_info()->closure()->context()->native_context()) {
+ return false;
+ }
int nodes_added = InliningAstSize(target);
if (nodes_added == kNotInlinable) return false;
@@ -7830,12 +7864,16 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
}
// Parse and allocate variables.
- CompilationInfo target_info(target, zone());
// Use the same AstValueFactory for creating strings in the sub-compilation
// step, but don't transfer ownership to target_info.
- target_info.SetAstValueFactory(top_info()->ast_value_factory(), false);
+ ParseInfo parse_info(zone(), target);
+ parse_info.set_ast_value_factory(
+ top_info()->parse_info()->ast_value_factory());
+ parse_info.set_ast_value_factory_owned(false);
+
+ CompilationInfo target_info(&parse_info);
Handle<SharedFunctionInfo> target_shared(target->shared());
- if (!Compiler::ParseAndAnalyze(&target_info)) {
+ if (!Compiler::ParseAndAnalyze(target_info.parse_info())) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
@@ -7898,13 +7936,17 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
DCHECK(target_shared->has_deoptimization_support());
AstTyper::Run(&target_info);
- int function_id = top_info()->TraceInlinedFunction(target_shared, position);
+ int inlining_id = 0;
+ if (top_info()->is_tracking_positions()) {
+ inlining_id = top_info()->TraceInlinedFunction(
+ target_shared, source_position(), function_state()->inlining_id());
+ }
// Save the pending call context. Set up new one for the inlined function.
// The function state is new-allocated because we need to delete it
// in two different places.
- FunctionState* target_state = new FunctionState(
- this, &target_info, inlining_kind, function_id);
+ FunctionState* target_state =
+ new FunctionState(this, &target_info, inlining_kind, inlining_id);
HConstant* undefined = graph()->GetConstantUndefined();
@@ -7947,6 +7989,9 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
Add<HEnterInlined>(return_id, target, context, arguments_count, function,
function_state()->inlining_kind(),
function->scope()->arguments(), arguments_object);
+ if (top_info()->is_tracking_positions()) {
+ enter_inlined->set_inlining_id(inlining_id);
+ }
function_state()->set_entry(enter_inlined);
VisitDeclarations(target_info.scope()->declarations());
@@ -8054,25 +8099,16 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
- return TryInline(expr->target(),
- expr->arguments()->length(),
- NULL,
- expr->id(),
- expr->ReturnId(),
- NORMAL_RETURN,
- ScriptPositionToSourcePosition(expr->position()));
+ return TryInline(expr->target(), expr->arguments()->length(), NULL,
+ expr->id(), expr->ReturnId(), NORMAL_RETURN);
}
bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
HValue* implicit_return_value) {
- return TryInline(expr->target(),
- expr->arguments()->length(),
- implicit_return_value,
- expr->id(),
- expr->ReturnId(),
- CONSTRUCT_CALL_RETURN,
- ScriptPositionToSourcePosition(expr->position()));
+ return TryInline(expr->target(), expr->arguments()->length(),
+ implicit_return_value, expr->id(), expr->ReturnId(),
+ CONSTRUCT_CALL_RETURN);
}
@@ -8081,13 +8117,7 @@ bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
BailoutId ast_id,
BailoutId return_id) {
if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
- return TryInline(getter,
- 0,
- NULL,
- ast_id,
- return_id,
- GETTER_CALL_RETURN,
- source_position());
+ return TryInline(getter, 0, NULL, ast_id, return_id, GETTER_CALL_RETURN);
}
@@ -8097,25 +8127,16 @@ bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
BailoutId assignment_id,
HValue* implicit_return_value) {
if (TryInlineApiSetter(setter, receiver_map, id)) return true;
- return TryInline(setter,
- 1,
- implicit_return_value,
- id, assignment_id,
- SETTER_CALL_RETURN,
- source_position());
+ return TryInline(setter, 1, implicit_return_value, id, assignment_id,
+ SETTER_CALL_RETURN);
}
bool HOptimizedGraphBuilder::TryInlineIndirectCall(Handle<JSFunction> function,
Call* expr,
int arguments_count) {
- return TryInline(function,
- arguments_count,
- NULL,
- expr->id(),
- expr->ReturnId(),
- NORMAL_RETURN,
- ScriptPositionToSourcePosition(expr->position()));
+ return TryInline(function, arguments_count, NULL, expr->id(),
+ expr->ReturnId(), NORMAL_RETURN);
}
@@ -8164,11 +8185,12 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
bool HOptimizedGraphBuilder::IsReadOnlyLengthDescriptor(
Handle<Map> jsarray_map) {
DCHECK(!jsarray_map->is_dictionary_map());
- LookupResult lookup;
Isolate* isolate = jsarray_map->GetIsolate();
Handle<Name> length_string = isolate->factory()->length_string();
- lookup.LookupDescriptor(*jsarray_map, *length_string);
- return lookup.IsReadOnly();
+ DescriptorArray* descriptors = jsarray_map->instance_descriptors();
+ int number = descriptors->SearchWithCache(*length_string, *jsarray_map);
+ DCHECK_NE(DescriptorArray::kNotFound, number);
+ return descriptors->GetDetails(number).IsReadOnly();
}
@@ -8625,9 +8647,18 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
int argc,
BailoutId ast_id,
ApiCallType call_type) {
+ if (function->context()->native_context() !=
+ top_info()->closure()->context()->native_context()) {
+ return false;
+ }
CallOptimization optimization(function);
if (!optimization.is_simple_api_call()) return false;
Handle<Map> holder_map;
+ for (int i = 0; i < receiver_maps->length(); ++i) {
+ auto map = receiver_maps->at(i);
+ // Don't inline calls to receivers requiring accesschecks.
+ if (map->is_access_check_needed()) return false;
+ }
if (call_type == kCallApiFunction) {
// Cannot embed a direct reference to the global proxy map
// as it maybe dropped on deserialization.
@@ -9079,10 +9110,10 @@ bool HOptimizedGraphBuilder::TryHandleArrayCallNew(CallNew* expr,
return false;
}
- BuildArrayCall(expr,
- expr->arguments()->length(),
- function,
- expr->allocation_site());
+ Handle<AllocationSite> site = expr->allocation_site();
+ if (site.is_null()) return false;
+
+ BuildArrayCall(expr, expr->arguments()->length(), function, site);
return true;
}
@@ -9132,8 +9163,6 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(PushLoad(prop, receiver, key));
HValue* function = Pop();
- if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
-
if (function->IsConstant() &&
HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
// Push the function under the receiver.
@@ -9206,58 +9235,21 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
// evaluation of the arguments.
CHECK_ALIVE(VisitForValue(expr->expression()));
HValue* function = Top();
- if (expr->global_call()) {
- Variable* var = proxy->var();
- bool known_global_function = false;
- // If there is a global property cell for the name at compile time and
- // access check is not enabled we assume that the function will not change
- // and generate optimized code for calling the function.
- Handle<GlobalObject> global(current_info()->global_object());
- LookupIterator it(global, var->name(),
- LookupIterator::OWN_SKIP_INTERCEPTOR);
- GlobalPropertyAccess type = LookupGlobalProperty(var, &it, LOAD);
- if (type == kUseCell) {
- known_global_function = expr->ComputeGlobalTarget(global, &it);
- }
- if (known_global_function) {
- Add<HCheckValue>(function, expr->target());
-
- // Placeholder for the receiver.
- Push(graph()->GetConstantUndefined());
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
- // Patch the global object on the stack by the expected receiver.
- HValue* receiver = ImplicitReceiverFor(function, expr->target());
- const int receiver_index = argument_count - 1;
- environment()->SetExpressionStackAt(receiver_index, receiver);
-
- if (TryInlineBuiltinFunctionCall(expr)) {
- if (FLAG_trace_inlining) {
- PrintF("Inlining builtin ");
- expr->target()->ShortPrint();
- PrintF("\n");
- }
- return;
- }
- if (TryInlineApiFunctionCall(expr, receiver)) return;
- if (TryHandleArrayCall(expr, function)) return;
- if (TryInlineCall(expr)) return;
+ if (function->IsConstant() &&
+ HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+ Handle<Object> constant = HConstant::cast(function)->handle(isolate());
+ Handle<JSFunction> target = Handle<JSFunction>::cast(constant);
+ expr->SetKnownGlobalTarget(target);
+ }
- PushArgumentsFromEnvironment(argument_count);
- call = BuildCallConstantFunction(expr->target(), argument_count);
- } else {
- Push(graph()->GetConstantUndefined());
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
- PushArgumentsFromEnvironment(argument_count);
- call = New<HCallFunction>(function, argument_count);
- }
+ // Placeholder for the receiver.
+ Push(graph()->GetConstantUndefined());
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
- } else if (expr->IsMonomorphic()) {
+ if (expr->IsMonomorphic()) {
Add<HCheckValue>(function, expr->target());
- Push(graph()->GetConstantUndefined());
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
-
+ // Patch the global object on the stack by the expected receiver.
HValue* receiver = ImplicitReceiverFor(function, expr->target());
const int receiver_index = argument_count - 1;
environment()->SetExpressionStackAt(receiver_index, receiver);
@@ -9271,15 +9263,12 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
return;
}
if (TryInlineApiFunctionCall(expr, receiver)) return;
-
+ if (TryHandleArrayCall(expr, function)) return;
if (TryInlineCall(expr)) return;
- call = PreProcessCall(New<HInvokeFunction>(
- function, expr->target(), argument_count));
-
+ PushArgumentsFromEnvironment(argument_count);
+ call = BuildCallConstantFunction(expr->target(), argument_count);
} else {
- Push(graph()->GetConstantUndefined());
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
PushArgumentsFromEnvironment(argument_count);
HCallFunction* call_function =
New<HCallFunction>(function, argument_count);
@@ -9408,7 +9397,7 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
int argument_count = expr->arguments()->length() + 1; // Plus constructor.
Factory* factory = isolate()->factory();
@@ -9418,6 +9407,12 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
HValue* function = Top();
CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ if (function->IsConstant() &&
+ HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+ Handle<Object> constant = HConstant::cast(function)->handle(isolate());
+ expr->SetKnownGlobalTarget(Handle<JSFunction>::cast(constant));
+ }
+
if (FLAG_inline_construct &&
expr->IsMonomorphic() &&
IsAllocationInlineable(expr->target())) {
@@ -9521,22 +9516,6 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
}
-// Support for generating inlined runtime functions.
-
-// Lookup table for generators for runtime calls that are generated inline.
-// Elements of the table are member pointers to functions of
-// HOptimizedGraphBuilder.
-#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
- &HOptimizedGraphBuilder::Generate##Name,
-
-const HOptimizedGraphBuilder::InlineFunctionGenerator
- HOptimizedGraphBuilder::kInlineFunctionGenerators[] = {
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
- INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
-};
-#undef INLINE_FUNCTION_GENERATOR_ADDRESS
-
-
template <class ViewClass>
void HGraphBuilder::BuildArrayBufferViewInitialization(
HValue* obj,
@@ -9915,30 +9894,21 @@ void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
const Runtime::Function* function = expr->function();
DCHECK(function != NULL);
-
- if (function->intrinsic_type == Runtime::INLINE ||
- function->intrinsic_type == Runtime::INLINE_OPTIMIZED) {
- DCHECK(expr->name()->length() > 0);
- DCHECK(expr->name()->Get(0) == '_');
- // Call to an inline function.
- int lookup_index = static_cast<int>(function->function_id) -
- static_cast<int>(Runtime::kFirstInlineFunction);
- DCHECK(lookup_index >= 0);
- DCHECK(static_cast<size_t>(lookup_index) <
- arraysize(kInlineFunctionGenerators));
- InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
-
- // Call the inline code generator using the pointer-to-member.
- (this->*generator)(expr);
- } else {
- DCHECK(function->intrinsic_type == Runtime::RUNTIME);
- Handle<String> name = expr->name();
- int argument_count = expr->arguments()->length();
- CHECK_ALIVE(VisitExpressions(expr->arguments()));
- PushArgumentsFromEnvironment(argument_count);
- HCallRuntime* call = New<HCallRuntime>(name, function,
- argument_count);
- return ast_context()->ReturnInstruction(call, expr->id());
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: \
+ return Generate##Name(expr);
+
+ FOR_EACH_HYDROGEN_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Handle<String> name = expr->name();
+ int argument_count = expr->arguments()->length();
+ CHECK_ALIVE(VisitExpressions(expr->arguments()));
+ PushArgumentsFromEnvironment(argument_count);
+ HCallRuntime* call = New<HCallRuntime>(name, function, argument_count);
+ return ast_context()->ReturnInstruction(call, expr->id());
+ }
}
}
@@ -10112,7 +10082,7 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
DCHECK(!HasStackOverflow());
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
Expression* target = expr->expression();
VariableProxy* proxy = target->AsVariableProxy();
Property* prop = target->AsProperty();
@@ -10321,9 +10291,9 @@ HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
HConstant* constant = HConstant::cast(value);
Maybe<HConstant*> number =
constant->CopyToTruncatedNumber(isolate(), zone());
- if (number.has_value) {
+ if (number.IsJust()) {
*expected = Type::Number(zone());
- return AddInstruction(number.value);
+ return AddInstruction(number.FromJust());
}
}
@@ -10468,16 +10438,21 @@ HValue* HGraphBuilder::BuildBinaryOperation(
return AddUncasted<HInvokeFunction>(function, 2);
}
- // Fast path for empty constant strings.
- if (left->IsConstant() &&
- HConstant::cast(left)->HasStringValue() &&
- HConstant::cast(left)->StringValue()->length() == 0) {
- return right;
- }
- if (right->IsConstant() &&
- HConstant::cast(right)->HasStringValue() &&
- HConstant::cast(right)->StringValue()->length() == 0) {
- return left;
+ // Fast paths for empty constant strings.
+ Handle<String> left_string =
+ left->IsConstant() && HConstant::cast(left)->HasStringValue()
+ ? HConstant::cast(left)->StringValue()
+ : Handle<String>();
+ Handle<String> right_string =
+ right->IsConstant() && HConstant::cast(right)->HasStringValue()
+ ? HConstant::cast(right)->StringValue()
+ : Handle<String>();
+ if (!left_string.is_null() && left_string->length() == 0) return right;
+ if (!right_string.is_null() && right_string->length() == 0) return left;
+ if (!left_string.is_null() && !right_string.is_null()) {
+ return AddUncasted<HStringAdd>(
+ left, right, allocation_mode.GetPretenureMode(),
+ STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
}
// Register the dependent code with the allocation site.
@@ -10539,10 +10514,10 @@ HValue* HGraphBuilder::BuildBinaryOperation(
instr = AddUncasted<HMul>(left, right);
break;
case Token::MOD: {
- if (fixed_right_arg.has_value &&
- !right->EqualsInteger32Constant(fixed_right_arg.value)) {
- HConstant* fixed_right = Add<HConstant>(
- static_cast<int>(fixed_right_arg.value));
+ if (fixed_right_arg.IsJust() &&
+ !right->EqualsInteger32Constant(fixed_right_arg.FromJust())) {
+ HConstant* fixed_right =
+ Add<HConstant>(static_cast<int>(fixed_right_arg.FromJust()));
IfBuilder if_same(this);
if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
if_same.Then();
@@ -10763,7 +10738,7 @@ void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
BuildBinaryOperation(expr, left, right,
ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
: PUSH_BEFORE_SIMULATE);
- if (FLAG_hydrogen_track_positions && result->IsBinaryOperation()) {
+ if (top_info()->is_tracking_positions() && result->IsBinaryOperation()) {
HBinaryOperation::cast(result)->SetOperandPositions(
zone(),
ScriptPositionToSourcePosition(expr->left()->position()),
@@ -10801,7 +10776,7 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
// Check for a few fast cases. The AST visiting behavior must be in sync
// with the full codegen: We don't push both left and right values onto
@@ -10836,8 +10811,6 @@ void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
CHECK_ALIVE(VisitForValue(expr->left()));
CHECK_ALIVE(VisitForValue(expr->right()));
- if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
-
HValue* right = Pop();
HValue* left = Pop();
Token::Value op = expr->op();
@@ -10945,7 +10918,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
AddCheckMap(operand_to_check, map);
HCompareObjectEqAndBranch* result =
New<HCompareObjectEqAndBranch>(left, right);
- if (FLAG_hydrogen_track_positions) {
+ if (top_info()->is_tracking_positions()) {
result->set_operand_position(zone(), 0, left_position);
result->set_operand_position(zone(), 1, right_position);
}
@@ -11011,7 +10984,7 @@ HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
HCompareNumericAndBranch* result =
New<HCompareNumericAndBranch>(left, right, op);
result->set_observed_input_representation(left_rep, right_rep);
- if (FLAG_hydrogen_track_positions) {
+ if (top_info()->is_tracking_positions()) {
result->SetOperandPositions(zone(), left_position, right_position);
}
return result;
@@ -11027,7 +11000,7 @@ void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
DCHECK(current_block() != NULL);
DCHECK(current_block()->HasPredecessor());
DCHECK(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
- if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
+ if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
CHECK_ALIVE(VisitForValue(sub_expr));
HValue* value = Pop();
if (expr->op() == Token::EQ_STRICT) {
@@ -11625,11 +11598,6 @@ void HOptimizedGraphBuilder::GenerateHasFastPackedElements(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
- return Bailout(kInlinedRuntimeFunctionIsNonNegativeSmi);
-}
-
-
void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -11639,12 +11607,6 @@ void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf(
- CallRuntime* call) {
- return Bailout(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf);
-}
-
-
// Support for construct call checks.
void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
DCHECK(call->arguments()->length() == 0);
@@ -11703,14 +11665,6 @@ void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
}
-// Support for accessing the class and value fields of an object.
-void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) {
- // The special form detected by IsClassOfTest is detected before we get here
- // and does not cause a bailout.
- return Bailout(kInlinedRuntimeFunctionClassOf);
-}
-
-
void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -11739,6 +11693,17 @@ void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateJSValueGetValue(CallRuntime* call) {
+ DCHECK(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = Add<HLoadNamedField>(
+ value, nullptr,
+ HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset));
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
DCHECK(call->arguments()->length() == 2);
DCHECK_NOT_NULL(call->arguments()->at(1)->AsLiteral());
@@ -11900,6 +11865,15 @@ void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateStringGetLength(CallRuntime* call) {
+ DCHECK(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* string = Pop();
+ HInstruction* result = AddLoadStringLength(string);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
// Support for direct calls from JavaScript to native RegExp code.
void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
DCHECK_EQ(4, call->arguments()->length());
@@ -12015,12 +11989,6 @@ void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateDefaultConstructorCallSuper(
- CallRuntime* call) {
- return Bailout(kSuperReference);
-}
-
-
// Fast call to math functions.
void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
DCHECK_EQ(2, call->arguments()->length());
@@ -12033,6 +12001,24 @@ void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
}
+void HOptimizedGraphBuilder::GenerateMathClz32(CallRuntime* call) {
+ DCHECK(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathClz32);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
+ DCHECK(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathFloor);
+ return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12042,7 +12028,7 @@ void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
}
-void HOptimizedGraphBuilder::GenerateMathSqrtRT(CallRuntime* call) {
+void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
DCHECK(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
@@ -12705,21 +12691,15 @@ void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateFastOneByteArrayJoin(CallRuntime* call) {
+ // Simply returning undefined here would be semantically correct and even
+ // avoid the bailout. Nevertheless, some ancient benchmarks like SunSpider's
+ // string-fasta would tank, because fullcode contains an optimized version.
+ // Obviously the fullcode => Crankshaft => bailout => fullcode dance is
+ // faster... *sigh*
return Bailout(kInlinedRuntimeFunctionFastOneByteArrayJoin);
}
-// Support for generators.
-void HOptimizedGraphBuilder::GenerateGeneratorNext(CallRuntime* call) {
- return Bailout(kInlinedRuntimeFunctionGeneratorNext);
-}
-
-
-void HOptimizedGraphBuilder::GenerateGeneratorThrow(CallRuntime* call) {
- return Bailout(kInlinedRuntimeFunctionGeneratorThrow);
-}
-
-
void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
CallRuntime* call) {
Add<HDebugBreak>();
@@ -13210,9 +13190,8 @@ void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
PrintIndent();
std::ostringstream os;
os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction;
- if (FLAG_hydrogen_track_positions &&
- instruction->has_position() &&
- instruction->position().raw() != 0) {
+ if (graph->info()->is_tracking_positions() &&
+ instruction->has_position() && instruction->position().raw() != 0) {
const SourcePosition pos = instruction->position();
os << " pos:";
if (pos.inlining_id() != 0) os << pos.inlining_id() << "_";
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 8411b6ddf9..5fded82959 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -407,13 +407,11 @@ class HGraph FINAL : public ZoneObject {
use_optimistic_licm_ = value;
}
- void MarkRecursive() {
- is_recursive_ = true;
- }
+ void MarkRecursive() { is_recursive_ = true; }
+ bool is_recursive() const { return is_recursive_; }
- bool is_recursive() const {
- return is_recursive_;
- }
+ void MarkThisHasUses() { this_has_uses_ = true; }
+ bool this_has_uses() const { return this_has_uses_; }
void MarkDependsOnEmptyArrayProtoElements() {
// Add map dependency if not already added.
@@ -499,6 +497,7 @@ class HGraph FINAL : public ZoneObject {
Zone* zone_;
bool is_recursive_;
+ bool this_has_uses_;
bool use_optimistic_licm_;
bool depends_on_empty_array_proto_elements_;
int type_change_checksum_;
@@ -1869,12 +1868,14 @@ class HGraphBuilder {
protected:
void SetSourcePosition(int position) {
- DCHECK(position != RelocInfo::kNoPosition);
- position_.set_position(position - start_position_);
+ if (position != RelocInfo::kNoPosition) {
+ position_.set_position(position - start_position_);
+ }
+ // Otherwise position remains unknown.
}
void EnterInlinedSource(int start_position, int id) {
- if (FLAG_hydrogen_track_positions) {
+ if (top_info()->is_tracking_positions()) {
start_position_ = start_position;
position_.set_inlining_id(id);
}
@@ -2118,15 +2119,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
protected:
- // Type of a member function that generates inline code for a native function.
- typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator)
- (CallRuntime* call);
-
// Forward declarations for inner scope classes.
class SubgraphScope;
- static const InlineFunctionGenerator kInlineFunctionGenerators[];
-
static const int kMaxCallPolymorphism = 4;
static const int kMaxLoadPolymorphism = 4;
static const int kMaxStorePolymorphism = 4;
@@ -2168,13 +2163,85 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
return function_state()->compilation_info()->language_mode();
}
- // Generators for inline runtime functions.
-#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \
- void Generate##Name(CallRuntime* call);
-
- INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
- INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
-#undef INLINE_FUNCTION_GENERATOR_DECLARATION
+#define FOR_EACH_HYDROGEN_INTRINSIC(F) \
+ F(IsSmi) \
+ F(IsArray) \
+ F(IsRegExp) \
+ F(IsJSProxy) \
+ F(IsConstructCall) \
+ F(CallFunction) \
+ F(ArgumentsLength) \
+ F(Arguments) \
+ F(ValueOf) \
+ F(SetValueOf) \
+ F(DateField) \
+ F(StringCharFromCode) \
+ F(StringCharAt) \
+ F(OneByteSeqStringSetChar) \
+ F(TwoByteSeqStringSetChar) \
+ F(ObjectEquals) \
+ F(IsObject) \
+ F(IsFunction) \
+ F(IsUndetectableObject) \
+ F(IsSpecObject) \
+ F(MathPow) \
+ F(IsMinusZero) \
+ F(HasCachedArrayIndex) \
+ F(GetCachedArrayIndex) \
+ F(FastOneByteArrayJoin) \
+ F(DebugBreakInOptimizedCode) \
+ F(StringCharCodeAt) \
+ F(StringAdd) \
+ F(SubString) \
+ F(StringCompare) \
+ F(RegExpExec) \
+ F(RegExpConstructResult) \
+ F(GetFromCache) \
+ F(NumberToString) \
+ F(DebugIsActive) \
+ /* Typed Arrays */ \
+ F(TypedArrayInitialize) \
+ F(DataViewInitialize) \
+ F(MaxSmi) \
+ F(TypedArrayMaxSizeInHeap) \
+ F(ArrayBufferViewGetByteLength) \
+ F(ArrayBufferViewGetByteOffset) \
+ F(TypedArrayGetLength) \
+ /* ArrayBuffer */ \
+ F(ArrayBufferGetByteLength) \
+ /* Maths */ \
+ F(ConstructDouble) \
+ F(DoubleHi) \
+ F(DoubleLo) \
+ F(MathClz32) \
+ F(MathFloor) \
+ F(MathSqrt) \
+ F(MathLogRT) \
+ /* ES6 Collections */ \
+ F(MapClear) \
+ F(MapDelete) \
+ F(MapGet) \
+ F(MapGetSize) \
+ F(MapHas) \
+ F(MapInitialize) \
+ F(MapSet) \
+ F(SetAdd) \
+ F(SetClear) \
+ F(SetDelete) \
+ F(SetGetSize) \
+ F(SetHas) \
+ F(SetInitialize) \
+ /* Arrays */ \
+ F(HasFastPackedElements) \
+ F(GetPrototype) \
+ /* Strings */ \
+ F(StringGetLength) \
+ /* JSValue */ \
+ F(JSValueGetValue)
+
+#define GENERATOR_DECLARATION(Name) void Generate##Name(CallRuntime* call);
+ FOR_EACH_HYDROGEN_INTRINSIC(GENERATOR_DECLARATION)
+#undef GENERATOR_DECLARATION
void VisitDelete(UnaryOperation* expr);
void VisitVoid(UnaryOperation* expr);
@@ -2319,8 +2386,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
int InliningAstSize(Handle<JSFunction> target);
bool TryInline(Handle<JSFunction> target, int arguments_count,
HValue* implicit_return_value, BailoutId ast_id,
- BailoutId return_id, InliningKind inlining_kind,
- SourcePosition position);
+ BailoutId return_id, InliningKind inlining_kind);
bool TryInlineCall(Call* expr);
bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
@@ -2430,121 +2496,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
void BuildInlinedCallArray(Expression* expression, int argument_count,
Handle<AllocationSite> site);
- class LookupResult FINAL BASE_EMBEDDED {
- public:
- LookupResult()
- : lookup_type_(NOT_FOUND),
- details_(NONE, DATA, Representation::None()) {}
-
- void LookupDescriptor(Map* map, Name* name) {
- DescriptorArray* descriptors = map->instance_descriptors();
- int number = descriptors->SearchWithCache(name, map);
- if (number == DescriptorArray::kNotFound) return NotFound();
- lookup_type_ = DESCRIPTOR_TYPE;
- details_ = descriptors->GetDetails(number);
- number_ = number;
- }
-
- void LookupTransition(Map* map, Name* name, PropertyAttributes attributes) {
- int transition_index = map->SearchTransition(kData, name, attributes);
- if (transition_index == TransitionArray::kNotFound) return NotFound();
- lookup_type_ = TRANSITION_TYPE;
- transition_ = handle(map->GetTransition(transition_index));
- number_ = transition_->LastAdded();
- details_ = transition_->instance_descriptors()->GetDetails(number_);
- }
-
- void NotFound() {
- lookup_type_ = NOT_FOUND;
- details_ = PropertyDetails(NONE, DATA, 0);
- }
-
- Representation representation() const {
- DCHECK(IsFound());
- return details_.representation();
- }
-
- // Property callbacks does not include transitions to callbacks.
- bool IsAccessorConstant() const {
- return !IsTransition() && details_.type() == ACCESSOR_CONSTANT;
- }
-
- bool IsReadOnly() const {
- DCHECK(IsFound());
- return details_.IsReadOnly();
- }
-
- bool IsData() const {
- return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == DATA;
- }
-
- bool IsDataConstant() const {
- return lookup_type_ == DESCRIPTOR_TYPE &&
- details_.type() == DATA_CONSTANT;
- }
-
- bool IsConfigurable() const { return details_.IsConfigurable(); }
- bool IsFound() const { return lookup_type_ != NOT_FOUND; }
- bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
-
- // Is the result is a property excluding transitions and the null
- // descriptor?
- bool IsProperty() const { return IsFound() && !IsTransition(); }
-
- Handle<Map> GetTransitionTarget() const {
- DCHECK(IsTransition());
- return transition_;
- }
-
- bool IsTransitionToData() const {
- return IsTransition() && details_.type() == DATA;
- }
-
- int GetLocalFieldIndexFromMap(Map* map) const {
- return GetFieldIndexFromMap(map) - map->inobject_properties();
- }
-
- Object* GetConstantFromMap(Map* map) const {
- DCHECK(details_.type() == DATA_CONSTANT);
- return GetValueFromMap(map);
- }
-
- Object* GetValueFromMap(Map* map) const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
- lookup_type_ == TRANSITION_TYPE);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return map->instance_descriptors()->GetValue(number_);
- }
-
- int GetFieldIndexFromMap(Map* map) const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
- lookup_type_ == TRANSITION_TYPE);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return map->instance_descriptors()->GetFieldIndex(number_);
- }
-
- HeapType* GetFieldTypeFromMap(Map* map) const {
- DCHECK_NE(NOT_FOUND, lookup_type_);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return map->instance_descriptors()->GetFieldType(number_);
- }
-
- Map* GetFieldOwnerFromMap(Map* map) const {
- DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
- lookup_type_ == TRANSITION_TYPE);
- DCHECK(number_ < map->NumberOfOwnDescriptors());
- return map->FindFieldOwner(number_);
- }
-
- private:
- // Where did we find the result;
- enum { NOT_FOUND, DESCRIPTOR_TYPE, TRANSITION_TYPE } lookup_type_;
-
- Handle<Map> transition_;
- int number_;
- PropertyDetails details_;
- };
-
class PropertyAccessInfo {
public:
PropertyAccessInfo(HOptimizedGraphBuilder* builder,
@@ -2555,7 +2506,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
map_(map),
name_(name),
field_type_(HType::Tagged()),
- access_(HObjectAccess::ForMap()) {}
+ access_(HObjectAccess::ForMap()),
+ lookup_type_(NOT_FOUND),
+ details_(NONE, DATA, Representation::None()) {}
// Checkes whether this PropertyAccessInfo can be handled as a monomorphic
// load named. It additionally fills in the fields necessary to generate the
@@ -2604,20 +2557,26 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
Handle<JSObject> holder() { return holder_; }
Handle<JSFunction> accessor() { return accessor_; }
Handle<Object> constant() { return constant_; }
- Handle<Map> transition() { return lookup_.GetTransitionTarget(); }
+ Handle<Map> transition() { return transition_; }
SmallMapList* field_maps() { return &field_maps_; }
HType field_type() const { return field_type_; }
HObjectAccess access() { return access_; }
- bool IsFound() const { return lookup_.IsFound(); }
- bool IsProperty() const { return lookup_.IsProperty(); }
- bool IsData() const { return lookup_.IsData(); }
- bool IsDataConstant() const { return lookup_.IsDataConstant(); }
- bool IsAccessorConstant() const { return lookup_.IsAccessorConstant(); }
- bool IsTransition() const { return lookup_.IsTransition(); }
-
- bool IsConfigurable() const { return lookup_.IsConfigurable(); }
- bool IsReadOnly() const { return lookup_.IsReadOnly(); }
+ bool IsFound() const { return lookup_type_ != NOT_FOUND; }
+ bool IsProperty() const { return IsFound() && !IsTransition(); }
+ bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
+ bool IsData() const {
+ return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == DATA;
+ }
+ bool IsDataConstant() const {
+ return lookup_type_ == DESCRIPTOR_TYPE &&
+ details_.type() == DATA_CONSTANT;
+ }
+ bool IsAccessorConstant() const {
+ return !IsTransition() && details_.type() == ACCESSOR_CONSTANT;
+ }
+ bool IsConfigurable() const { return details_.IsConfigurable(); }
+ bool IsReadOnly() const { return details_.IsReadOnly(); }
bool IsStringType() { return map_->instance_type() < FIRST_NONSTRING_TYPE; }
bool IsNumberType() { return map_->instance_type() == HEAP_NUMBER_TYPE; }
@@ -2625,31 +2584,71 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
bool IsArrayType() { return map_->instance_type() == JS_ARRAY_TYPE; }
private:
- Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
- return handle(lookup_.GetValueFromMap(*map), isolate());
- }
Handle<Object> GetConstantFromMap(Handle<Map> map) const {
- return handle(lookup_.GetConstantFromMap(*map), isolate());
+ DCHECK_EQ(DESCRIPTOR_TYPE, lookup_type_);
+ DCHECK(number_ < map->NumberOfOwnDescriptors());
+ return handle(map->instance_descriptors()->GetValue(number_), isolate());
+ }
+ Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
+ return GetConstantFromMap(map);
}
Handle<HeapType> GetFieldTypeFromMap(Handle<Map> map) const {
- return handle(lookup_.GetFieldTypeFromMap(*map), isolate());
+ DCHECK(IsFound());
+ DCHECK(number_ < map->NumberOfOwnDescriptors());
+ return handle(map->instance_descriptors()->GetFieldType(number_),
+ isolate());
}
Handle<Map> GetFieldOwnerFromMap(Handle<Map> map) const {
- return handle(lookup_.GetFieldOwnerFromMap(*map));
+ DCHECK(IsFound());
+ DCHECK(number_ < map->NumberOfOwnDescriptors());
+ return handle(map->FindFieldOwner(number_));
}
int GetLocalFieldIndexFromMap(Handle<Map> map) const {
- return lookup_.GetLocalFieldIndexFromMap(*map);
+ DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
+ lookup_type_ == TRANSITION_TYPE);
+ DCHECK(number_ < map->NumberOfOwnDescriptors());
+ int field_index = map->instance_descriptors()->GetFieldIndex(number_);
+ return field_index - map->inobject_properties();
+ }
+
+ void LookupDescriptor(Map* map, Name* name) {
+ DescriptorArray* descriptors = map->instance_descriptors();
+ int number = descriptors->SearchWithCache(name, map);
+ if (number == DescriptorArray::kNotFound) return NotFound();
+ lookup_type_ = DESCRIPTOR_TYPE;
+ details_ = descriptors->GetDetails(number);
+ number_ = number;
+ }
+ void LookupTransition(Map* map, Name* name, PropertyAttributes attributes) {
+ Map* target =
+ TransitionArray::SearchTransition(map, kData, name, attributes);
+ if (target == NULL) return NotFound();
+ lookup_type_ = TRANSITION_TYPE;
+ transition_ = handle(target);
+ number_ = transition_->LastAdded();
+ details_ = transition_->instance_descriptors()->GetDetails(number_);
+ }
+ void NotFound() {
+ lookup_type_ = NOT_FOUND;
+ details_ = PropertyDetails::Empty();
+ }
+ Representation representation() const {
+ DCHECK(IsFound());
+ return details_.representation();
+ }
+ bool IsTransitionToData() const {
+ return IsTransition() && details_.type() == DATA;
}
- Representation representation() const { return lookup_.representation(); }
Zone* zone() { return builder_->zone(); }
CompilationInfo* top_info() { return builder_->top_info(); }
CompilationInfo* current_info() { return builder_->current_info(); }
bool LoadResult(Handle<Map> map);
- void LoadFieldMaps(Handle<Map> map);
+ bool LoadFieldMaps(Handle<Map> map);
bool LookupDescriptor();
bool LookupInPrototypes();
+ bool IsIntegerIndexedExotic();
bool IsCompatible(PropertyAccessInfo* other);
void GeneralizeRepresentation(Representation r) {
@@ -2657,7 +2656,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
access_.representation().generalize(r));
}
- LookupResult lookup_;
HOptimizedGraphBuilder* builder_;
PropertyAccessType access_type_;
Handle<Map> map_;
@@ -2669,6 +2667,11 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
SmallMapList field_maps_;
HType field_type_;
HObjectAccess access_;
+
+ enum { NOT_FOUND, DESCRIPTOR_TYPE, TRANSITION_TYPE } lookup_type_;
+ Handle<Map> transition_;
+ int number_;
+ PropertyDetails details_;
};
HInstruction* BuildMonomorphicAccess(PropertyAccessInfo* info,
@@ -2756,12 +2759,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
PropertyAccessType access_type,
bool* has_side_effects);
- HInstruction* BuildNamedGeneric(PropertyAccessType access,
- Expression* expr,
- HValue* object,
- Handle<String> name,
- HValue* value,
- bool is_uninitialized = false);
+ HInstruction* BuildNamedGeneric(PropertyAccessType access, Expression* expr,
+ HValue* object, Handle<String> name,
+ HValue* value, bool is_uninitialized = false);
HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/i18n.cc
index 69fa9ca895..9dfdd63f5b 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/i18n.cc
@@ -395,8 +395,8 @@ void SetResolvedNumberSettings(Isolate* isolate,
Handle<String> key =
factory->NewStringFromStaticChars("minimumSignificantDigits");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(resolved, key);
- CHECK(maybe.has_value);
- if (maybe.value) {
+ CHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("minimumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMinimumSignificantDigits()),
@@ -405,8 +405,8 @@ void SetResolvedNumberSettings(Isolate* isolate,
key = factory->NewStringFromStaticChars("maximumSignificantDigits");
maybe = JSReceiver::HasOwnProperty(resolved, key);
- CHECK(maybe.has_value);
- if (maybe.value) {
+ CHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("maximumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMaximumSignificantDigits()),
@@ -725,8 +725,8 @@ icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
Handle<String> key =
isolate->factory()->NewStringFromStaticChars("dateFormat");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
- CHECK(maybe.has_value);
- if (maybe.value) {
+ CHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
return reinterpret_cast<icu::SimpleDateFormat*>(
obj->GetInternalField(0));
}
@@ -805,8 +805,8 @@ icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
Handle<String> key =
isolate->factory()->NewStringFromStaticChars("numberFormat");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
- CHECK(maybe.has_value);
- if (maybe.value) {
+ CHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
}
@@ -866,8 +866,8 @@ icu::Collator* Collator::UnpackCollator(Isolate* isolate,
Handle<JSObject> obj) {
Handle<String> key = isolate->factory()->NewStringFromStaticChars("collator");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
- CHECK(maybe.has_value);
- if (maybe.value) {
+ CHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
}
@@ -931,8 +931,8 @@ icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
Handle<String> key =
isolate->factory()->NewStringFromStaticChars("breakIterator");
Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
- CHECK(maybe.has_value);
- if (maybe.value) {
+ CHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
}
diff --git a/deps/v8/src/i18n.js b/deps/v8/src/i18n.js
index 61e0ac98e5..c743cad95a 100644
--- a/deps/v8/src/i18n.js
+++ b/deps/v8/src/i18n.js
@@ -2,20 +2,28 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-"use strict";
-
// ECMAScript 402 API implementation.
/**
* Intl object is a single object that has some named properties,
* all of which are constructors.
*/
-$Object.defineProperty(global, "Intl", { enumerable: false, value: (function() {
+(function() {
-var Intl = {};
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalDate = global.Date;
+var GlobalRegExp = global.RegExp;
+var GlobalString = global.String;
var undefined = global.undefined;
+var Intl = {};
+
+%AddNamedProperty(global, "Intl", Intl, DONT_ENUM);
+
var AVAILABLE_SERVICES = ['collator',
'numberformat',
'dateformat',
@@ -48,7 +56,7 @@ var UNICODE_EXTENSION_RE = undefined;
function GetUnicodeExtensionRE() {
if (UNICODE_EXTENSION_RE === undefined) {
- UNICODE_EXTENSION_RE = new $RegExp('-u(-[a-z0-9]{2,8})+', 'g');
+ UNICODE_EXTENSION_RE = new GlobalRegExp('-u(-[a-z0-9]{2,8})+', 'g');
}
return UNICODE_EXTENSION_RE;
}
@@ -60,7 +68,7 @@ var ANY_EXTENSION_RE = undefined;
function GetAnyExtensionRE() {
if (ANY_EXTENSION_RE === undefined) {
- ANY_EXTENSION_RE = new $RegExp('-[a-z0-9]{1}-.*', 'g');
+ ANY_EXTENSION_RE = new GlobalRegExp('-[a-z0-9]{1}-.*', 'g');
}
return ANY_EXTENSION_RE;
}
@@ -72,7 +80,7 @@ var QUOTED_STRING_RE = undefined;
function GetQuotedStringRE() {
if (QUOTED_STRING_RE === undefined) {
- QUOTED_STRING_RE = new $RegExp("'[^']+'", 'g');
+ QUOTED_STRING_RE = new GlobalRegExp("'[^']+'", 'g');
}
return QUOTED_STRING_RE;
}
@@ -85,7 +93,7 @@ var SERVICE_RE = undefined;
function GetServiceRE() {
if (SERVICE_RE === undefined) {
SERVICE_RE =
- new $RegExp('^(collator|numberformat|dateformat|breakiterator)$');
+ new GlobalRegExp('^(collator|numberformat|dateformat|breakiterator)$');
}
return SERVICE_RE;
}
@@ -135,7 +143,7 @@ var TIMEZONE_NAME_CHECK_RE = undefined;
function GetTimezoneNameCheckRE() {
if (TIMEZONE_NAME_CHECK_RE === undefined) {
TIMEZONE_NAME_CHECK_RE =
- new $RegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
+ new GlobalRegExp('^([A-Za-z]+)/([A-Za-z]+)(?:_([A-Za-z]+))*$');
}
return TIMEZONE_NAME_CHECK_RE;
}
@@ -283,7 +291,7 @@ function supportedLocalesOf(service, locales, options) {
var matcher = options.localeMatcher;
if (matcher !== undefined) {
- matcher = $String(matcher);
+ matcher = GlobalString(matcher);
if (matcher !== 'lookup' && matcher !== 'best fit') {
throw new $RangeError('Illegal value for localeMatcher:' + matcher);
}
@@ -369,7 +377,7 @@ function getGetOption(options, caller) {
value = $Boolean(value);
break;
case 'string':
- value = $String(value);
+ value = GlobalString(value);
break;
case 'number':
value = $Number(value);
@@ -525,7 +533,7 @@ function setOptions(inOptions, extensionMap, keyValues, getOption, outOptions) {
var extension = '';
var updateExtension = function updateExtension(key, value) {
- return '-' + key + '-' + $String(value);
+ return '-' + key + '-' + GlobalString(value);
}
var updateProperty = function updateProperty(property, type, value) {
@@ -614,7 +622,7 @@ function getOptimalLanguageTag(original, resolved) {
}
// Preserve extensions of resolved locale, but swap base tags with original.
- var resolvedBase = new $RegExp('^' + locales[1].base);
+ var resolvedBase = new GlobalRegExp('^' + locales[1].base);
return resolved.replace(resolvedBase, locales[0].base);
}
@@ -704,7 +712,7 @@ function canonicalizeLanguageTag(localeID) {
throw new $TypeError('Language ID should be string or object.');
}
- var localeString = $String(localeID);
+ var localeString = GlobalString(localeID);
if (isValidLanguageTag(localeString) === false) {
throw new $RangeError('Invalid language tag: ' + localeString);
@@ -833,12 +841,12 @@ function BuildLanguageTagREs() {
var privateUse = '(x(-' + alphanum + '{1,8})+)';
var singleton = '(' + digit + '|[A-WY-Za-wy-z])';
- LANGUAGE_SINGLETON_RE = new $RegExp('^' + singleton + '$', 'i');
+ LANGUAGE_SINGLETON_RE = new GlobalRegExp('^' + singleton + '$', 'i');
var extension = '(' + singleton + '(-' + alphanum + '{2,8})+)';
var variant = '(' + alphanum + '{5,8}|(' + digit + alphanum + '{3}))';
- LANGUAGE_VARIANT_RE = new $RegExp('^' + variant + '$', 'i');
+ LANGUAGE_VARIANT_RE = new GlobalRegExp('^' + variant + '$', 'i');
var region = '(' + alpha + '{2}|' + digit + '{3})';
var script = '(' + alpha + '{4})';
@@ -850,7 +858,7 @@ function BuildLanguageTagREs() {
var languageTag =
'^(' + langTag + '|' + privateUse + '|' + grandfathered + ')$';
- LANGUAGE_TAG_RE = new $RegExp(languageTag, 'i');
+ LANGUAGE_TAG_RE = new GlobalRegExp(languageTag, 'i');
}
/**
@@ -1023,7 +1031,7 @@ function initializeCollator(collator, locales, options) {
*/
function compare(collator, x, y) {
return %InternalCompare(%GetImplFromInitializedIntlObject(collator),
- $String(x), $String(y));
+ GlobalString(x), GlobalString(y));
};
@@ -1276,7 +1284,7 @@ function formatNumber(formatter, value) {
*/
function parseNumber(formatter, value) {
return %InternalNumberParse(%GetImplFromInitializedIntlObject(formatter),
- $String(value));
+ GlobalString(value));
}
@@ -1658,7 +1666,7 @@ function initializeDateTimeFormat(dateFormat, locales, options) {
function formatDate(formatter, dateValue) {
var dateMs;
if (dateValue === undefined) {
- dateMs = $Date.now();
+ dateMs = GlobalDate.now();
} else {
dateMs = $Number(dateValue);
}
@@ -1668,7 +1676,7 @@ function formatDate(formatter, dateValue) {
}
return %InternalDateFormat(%GetImplFromInitializedIntlObject(formatter),
- new $Date(dateMs));
+ new GlobalDate(dateMs));
}
@@ -1680,7 +1688,7 @@ function formatDate(formatter, dateValue) {
*/
function parseDate(formatter, value) {
return %InternalDateParse(%GetImplFromInitializedIntlObject(formatter),
- $String(value));
+ GlobalString(value));
}
@@ -1841,7 +1849,7 @@ function initializeBreakIterator(iterator, locales, options) {
*/
function adoptText(iterator, text) {
%BreakIteratorAdoptText(%GetImplFromInitializedIntlObject(iterator),
- $String(text));
+ GlobalString(text));
}
@@ -1924,8 +1932,7 @@ function cachedOrNewService(service, locales, options, defaults) {
* Compares this and that, and returns less than 0, 0 or greater than 0 value.
* Overrides the built-in method.
*/
-ObjectDefineProperty($String.prototype, 'localeCompare', {
- value: function(that) {
+OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
@@ -1938,14 +1945,8 @@ ObjectDefineProperty($String.prototype, 'localeCompare', {
var options = %_Arguments(2);
var collator = cachedOrNewService('collator', locales, options);
return compare(collator, this, that);
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName($String.prototype.localeCompare, 'localeCompare');
-%FunctionRemovePrototype($String.prototype.localeCompare);
-%SetNativeFlag($String.prototype.localeCompare);
+ }
+);
/**
@@ -1955,15 +1956,14 @@ ObjectDefineProperty($String.prototype, 'localeCompare', {
* If the form is not one of "NFC", "NFD", "NFKC", or "NFKD", then throw
* a RangeError Exception.
*/
-ObjectDefineProperty($String.prototype, 'normalize', {
- value: function(that) {
+OverrideFunction(GlobalString.prototype, 'normalize', function(that) {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
CHECK_OBJECT_COERCIBLE(this, "String.prototype.normalize");
- var form = $String(%_Arguments(0) || 'NFC');
+ var form = GlobalString(%_Arguments(0) || 'NFC');
var normalizationForm = NORMALIZATION_FORMS.indexOf(form);
if (normalizationForm === -1) {
@@ -1972,22 +1972,15 @@ ObjectDefineProperty($String.prototype, 'normalize', {
}
return %StringNormalize(this, normalizationForm);
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName($String.prototype.normalize, 'normalize');
-%FunctionRemovePrototype($String.prototype.normalize);
-%SetNativeFlag($String.prototype.normalize);
+ }
+);
/**
* Formats a Number object (this) using locale and options values.
* If locale or options are omitted, defaults are used.
*/
-ObjectDefineProperty($Number.prototype, 'toLocaleString', {
- value: function() {
+OverrideFunction($Number.prototype, 'toLocaleString', function() {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
@@ -2000,21 +1993,15 @@ ObjectDefineProperty($Number.prototype, 'toLocaleString', {
var options = %_Arguments(1);
var numberFormat = cachedOrNewService('numberformat', locales, options);
return formatNumber(numberFormat, this);
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName($Number.prototype.toLocaleString, 'toLocaleString');
-%FunctionRemovePrototype($Number.prototype.toLocaleString);
-%SetNativeFlag($Number.prototype.toLocaleString);
+ }
+);
/**
* Returns actual formatted date or fails if date parameter is invalid.
*/
function toLocaleDateTime(date, locales, options, required, defaults, service) {
- if (!(date instanceof $Date)) {
+ if (!(date instanceof GlobalDate)) {
throw new $TypeError('Method invoked on an object that is not Date.');
}
@@ -2036,8 +2023,7 @@ function toLocaleDateTime(date, locales, options, required, defaults, service) {
* If locale or options are omitted, defaults are used - both date and time are
* present in the output.
*/
-ObjectDefineProperty($Date.prototype, 'toLocaleString', {
- value: function() {
+OverrideFunction(GlobalDate.prototype, 'toLocaleString', function() {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
@@ -2046,14 +2032,8 @@ ObjectDefineProperty($Date.prototype, 'toLocaleString', {
var options = %_Arguments(1);
return toLocaleDateTime(
this, locales, options, 'any', 'all', 'dateformatall');
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName($Date.prototype.toLocaleString, 'toLocaleString');
-%FunctionRemovePrototype($Date.prototype.toLocaleString);
-%SetNativeFlag($Date.prototype.toLocaleString);
+ }
+);
/**
@@ -2061,8 +2041,7 @@ ObjectDefineProperty($Date.prototype, 'toLocaleString', {
* If locale or options are omitted, defaults are used - only date is present
* in the output.
*/
-ObjectDefineProperty($Date.prototype, 'toLocaleDateString', {
- value: function() {
+OverrideFunction(GlobalDate.prototype, 'toLocaleDateString', function() {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
@@ -2071,14 +2050,8 @@ ObjectDefineProperty($Date.prototype, 'toLocaleDateString', {
var options = %_Arguments(1);
return toLocaleDateTime(
this, locales, options, 'date', 'date', 'dateformatdate');
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName($Date.prototype.toLocaleDateString, 'toLocaleDateString');
-%FunctionRemovePrototype($Date.prototype.toLocaleDateString);
-%SetNativeFlag($Date.prototype.toLocaleDateString);
+ }
+);
/**
@@ -2086,8 +2059,7 @@ ObjectDefineProperty($Date.prototype, 'toLocaleDateString', {
* If locale or options are omitted, defaults are used - only time is present
* in the output.
*/
-ObjectDefineProperty($Date.prototype, 'toLocaleTimeString', {
- value: function() {
+OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
if (%_IsConstructCall()) {
throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
}
@@ -2096,14 +2068,7 @@ ObjectDefineProperty($Date.prototype, 'toLocaleTimeString', {
var options = %_Arguments(1);
return toLocaleDateTime(
this, locales, options, 'time', 'time', 'dateformattime');
- },
- writable: true,
- configurable: true,
- enumerable: false
-});
-%FunctionSetName($Date.prototype.toLocaleTimeString, 'toLocaleTimeString');
-%FunctionRemovePrototype($Date.prototype.toLocaleTimeString);
-%SetNativeFlag($Date.prototype.toLocaleTimeString);
-
-return Intl;
-}())});
+ }
+);
+
+})();
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index c7ec6d9918..0fbd2c5865 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -154,12 +154,24 @@ void RelocInfo::set_target_object(Object* target,
}
-Address RelocInfo::target_reference() {
+Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
+Address RelocInfo::target_internal_reference() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return Memory::Address_at(pc_);
+}
+
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
@@ -268,7 +280,8 @@ Object** RelocInfo::call_object_address() {
void RelocInfo::WipeOut() {
- if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
@@ -300,7 +313,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -327,7 +341,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
@@ -516,6 +531,12 @@ void Assembler::emit_near_disp(Label* L) {
}
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ Memory::Address_at(pc) = target;
+}
+
+
void Operand::set_modrm(int mod, Register rm) {
DCHECK((mod & -4) == 0);
buf_[0] = mod << 6 | rm.code();
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 511b1c8688..2a3384d68d 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -179,17 +179,6 @@ bool RelocInfo::IsInConstantPool() {
}
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- for (int i = 0; i < instruction_count; i++) {
- *(pc_ + i) = *(instructions + i);
- }
-
- // Indicate that code has changed.
- CpuFeatures::FlushICache(pc_, instruction_count);
-}
-
-
// Patch the code at the current PC with a call to the target address.
// Additional guard int3 instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
@@ -2228,6 +2217,42 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x62);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::punpckhdq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x6A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF2);
+ EMIT(0x0F);
+ EMIT(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index a17b539bac..c5894cceca 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -40,8 +40,8 @@
#include <deque>
#include "src/assembler.h"
+#include "src/compiler.h"
#include "src/isolate.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -279,6 +279,14 @@ inline Condition CommuteCondition(Condition cc) {
}
+enum RoundingMode {
+ kRoundToNearest = 0x0,
+ kRoundDown = 0x1,
+ kRoundUp = 0x2,
+ kRoundToZero = 0x3
+};
+
+
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -538,6 +546,11 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, code, target);
}
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
static const int kSpecialTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
@@ -1002,13 +1015,6 @@ class Assembler : public AssemblerBase {
void ucomisd(XMMRegister dst, XMMRegister src) { ucomisd(dst, Operand(src)); }
void ucomisd(XMMRegister dst, const Operand& src);
- enum RoundingMode {
- kRoundToNearest = 0x0,
- kRoundDown = 0x1,
- kRoundUp = 0x2,
- kRoundToZero = 0x3
- };
-
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
@@ -1017,6 +1023,14 @@ class Assembler : public AssemblerBase {
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
+ void punpckldq(XMMRegister dst, XMMRegister src);
+ void punpckhdq(XMMRegister dst, XMMRegister src);
+
+ void maxsd(XMMRegister dst, XMMRegister src) { maxsd(dst, Operand(src)); }
+ void maxsd(XMMRegister dst, const Operand& src);
+ void minsd(XMMRegister dst, XMMRegister src) { minsd(dst, Operand(src)); }
+ void minsd(XMMRegister dst, const Operand& src);
+
void movdqa(XMMRegister dst, const Operand& src);
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
@@ -1241,6 +1255,18 @@ class Assembler : public AssemblerBase {
void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5e, dst, src1, src2);
}
+ void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vmaxsd(dst, src1, Operand(src2));
+ }
+ void vmaxsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5f, dst, src1, src2);
+ }
+ void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vminsd(dst, src1, Operand(src2));
+ }
+ void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5d, dst, src1, src2);
+ }
void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
// Prefetch src position into cache level.
@@ -1266,7 +1292,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
+ void RecordDeoptReason(const int reason, const SourcePosition position);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 9aa4e073f7..ea9b8c9704 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -990,42 +990,116 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+ const int calleeOffset) {
+ // eax : the number of items to be pushed to the stack
+ //
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, esp);
+ __ sub(ecx, edi);
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, eax);
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, edx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(ebp, calleeOffset)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+
+ __ bind(&okay);
+}
+
+
+static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int argumentsOffset,
+ const int indexOffset,
+ const int limitOffset) {
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ __ mov(key, Operand(ebp, indexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
+
+ if (FLAG_vector_ics) {
+ // TODO(mvstanton): Vector-based ics need additional infrastructure to
+ // be embedded here. For now, just call the runtime.
+ __ push(receiver);
+ __ push(key);
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ } else {
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+ }
+
+ // Push the nth argument.
+ __ push(eax);
+
+ // Update the index on the stack and in register key.
+ __ mov(key, Operand(ebp, indexOffset));
+ __ add(key, Immediate(1 << kSmiTagSize));
+ __ mov(Operand(ebp, indexOffset), key);
+
+ __ bind(&entry);
+ __ cmp(key, Operand(ebp, limitOffset));
+ __ j(not_equal, &loop);
+
+ // On exit, the pushed arguments count is in eax, untagged
+ __ Move(eax, key);
+ __ SmiUntag(eax);
+}
+
+
+// Used by FunctionApply and ReflectApply
+static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
+ const int kFormalParameters = targetIsArgument ? 3 : 2;
+ const int kStackSize = kFormalParameters + 1;
+
+ // Stack at entry:
+ // esp : return address
+ // esp[4] : arguments
+ // esp[8] : receiver ("this")
+ // esp[12] : function
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Stack frame:
+ // ebp : Old base pointer
+ // ebp[4] : return address
+ // ebp[8] : function arguments
+ // ebp[12] : receiver
+ // ebp[16] : function
+ static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ static const int kFunctionOffset = kReceiverOffset + kPointerSize;
__ push(Operand(ebp, kFunctionOffset)); // push this
__ push(Operand(ebp, kArgumentsOffset)); // push arguments
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, eax);
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
+ if (targetIsArgument) {
+ __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ } else {
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ }
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current index and limit.
const int kLimitOffset =
@@ -1088,55 +1162,20 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&push_receiver);
__ push(ebx);
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- __ mov(key, Operand(ebp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(receiver, Operand(ebp, kArgumentsOffset)); // load arguments
-
- if (FLAG_vector_ics) {
- // TODO(mvstanton): Vector-based ics need additional infrastructure to
- // be embedded here. For now, just call the runtime.
- __ push(receiver);
- __ push(key);
- __ CallRuntime(Runtime::kGetProperty, 2);
- } else {
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
- }
-
- // Push the nth argument.
- __ push(eax);
-
- // Update the index on the stack and in register key.
- __ mov(key, Operand(ebp, kIndexOffset));
- __ add(key, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, kIndexOffset), key);
-
- __ bind(&entry);
- __ cmp(key, Operand(ebp, kLimitOffset));
- __ j(not_equal, &loop);
+ // Loop over the arguments array, pushing each value to the stack
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
ParameterCount actual(eax);
- __ Move(eax, key);
- __ SmiUntag(eax);
__ mov(edi, Operand(ebp, kFunctionOffset));
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &call_proxy);
__ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
// Call the function proxy.
__ bind(&call_proxy);
@@ -1149,7 +1188,92 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Leave internal frame.
}
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
+}
+
+
+// Used by ReflectConstruct
+static void Generate_ConstructHelper(MacroAssembler* masm) {
+ const int kFormalParameters = 3;
+ const int kStackSize = kFormalParameters + 1;
+
+ // Stack at entry:
+ // esp : return address
+ // esp[4] : original constructor (new.target)
+ // esp[8] : arguments
+ // esp[16] : constructor
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Stack frame:
+ // ebp : Old base pointer
+ // ebp[4] : return address
+ // ebp[8] : original constructor (new.target)
+ // ebp[12] : arguments
+ // ebp[16] : constructor
+ static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
+ static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
+ static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+
+ // If newTarget is not supplied, set it to constructor
+ Label validate_arguments;
+ __ mov(eax, Operand(ebp, kNewTargetOffset));
+ __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &validate_arguments, Label::kNear);
+ __ mov(eax, Operand(ebp, kFunctionOffset));
+ __ mov(Operand(ebp, kNewTargetOffset), eax);
+
+ // Validate arguments
+ __ bind(&validate_arguments);
+ __ push(Operand(ebp, kFunctionOffset));
+ __ push(Operand(ebp, kArgumentsOffset));
+ __ push(Operand(ebp, kNewTargetOffset));
+ __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ Push(eax); // limit
+ __ push(Immediate(0)); // index
+ // Push newTarget and callee functions
+ __ push(Operand(ebp, kNewTargetOffset));
+ __ push(Operand(ebp, kFunctionOffset));
+
+ // Loop over the arguments array, pushing each value to the stack
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+
+ // Use undefined feedback vector
+ __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+
+ // Call the function.
+ CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ // Leave internal frame.
+ }
+ // remove this, target, arguments, and newTarget
+ __ ret(kStackSize * kPointerSize);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, false);
+}
+
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, true);
+}
+
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ Generate_ConstructHelper(masm);
}
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 9b7d9023bd..5436eee951 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -12,6 +12,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@@ -729,7 +730,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ ret(0);
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1078,8 +1079,15 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
+ // If the constructor was [[Call]]ed, the call will not push a new.target
+ // onto the stack. In that case the arguments array we construct is bogus,
+ // bu we do not care as the constructor throws immediately.
+ __ cmp(ecx, Immediate(Smi::FromInt(0)));
+ Label skip_decrement;
+ __ j(equal, &skip_decrement);
// Subtract 1 from smi-tagged arguments count.
__ sub(ecx, Immediate(2));
+ __ bind(&skip_decrement);
}
__ lea(edx, Operand(edx, ecx, times_2,
@@ -1189,7 +1197,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1465,22 +1473,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand::StaticVariable(pending_exception));
__ cmp(edx, eax);
__ j(equal, &runtime);
- // For exception, throw the exception again.
-
- // Clear the pending exception variable.
- __ mov(Operand::StaticVariable(pending_exception), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, factory->termination_exception());
- Label throw_termination_exception;
- __ j(equal, &throw_termination_exception, Label::kNear);
-
- // Handle normal exception by following handler chain.
- __ Throw(eax);
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(eax);
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ bind(&failure);
// For failure to match, return null.
@@ -1572,7 +1567,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -2516,15 +2511,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ cmp(eax, isolate()->factory()->exception());
__ j(equal, &exception_returned);
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
-
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
__ push(edx);
__ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
Label okay;
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
__ cmp(edx, Operand::StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
__ j(equal, &okay, Label::kNear);
@@ -2540,24 +2534,48 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ bind(&exception_returned);
- // Retrieve the pending exception.
- __ mov(eax, Operand::StaticVariable(pending_exception_address));
-
- // Clear the pending exception.
- __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_exception_address), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- Label throw_termination_exception;
- __ cmp(eax, isolate()->factory()->termination_exception());
- __ j(equal, &throw_termination_exception);
-
- // Handle normal exception.
- __ Throw(eax);
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
+
+ // Ask the runtime for help to determine the handler. This will set eax to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, eax);
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0)); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(0)); // argv.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(find_handler, 3);
+ }
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(eax);
+ // Retrieve the handler context, SP and FP.
+ __ mov(esi, Operand::StaticVariable(pending_handler_context_address));
+ __ mov(esp, Operand::StaticVariable(pending_handler_sp_address));
+ __ mov(ebp, Operand::StaticVariable(pending_handler_fp_address));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (esi == 0) for non-JS frames.
+ Label skip;
+ __ test(esi, esi);
+ __ j(zero, &skip, Label::kNear);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ __ bind(&skip);
+
+ // Compute the handler entry address and jump to it.
+ __ mov(edi, Operand::StaticVariable(pending_handler_code_address));
+ __ mov(edx, Operand::StaticVariable(pending_handler_offset_address));
+ __ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
+ __ jmp(edi);
}
@@ -2607,10 +2625,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ mov(eax, Immediate(isolate()->factory()->exception()));
__ jmp(&exit);
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ __ PushStackHandler();
// Clear any pending exceptions.
__ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
@@ -2636,7 +2653,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ call(edx);
// Unlink this frame from the handler chain.
- __ PopTryHandler();
+ __ PopStackHandler();
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
@@ -2924,7 +2941,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
+ MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@@ -2936,6 +2953,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ push(VectorLoadICDescriptor::VectorRegister());
+ __ push(VectorLoadICDescriptor::SlotRegister());
+ }
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
@@ -2951,6 +2972,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ mov(index_, eax);
}
__ pop(object_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ pop(VectorLoadICDescriptor::SlotRegister());
+ __ pop(VectorLoadICDescriptor::VectorRegister());
+ }
// Reload the instance type.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
@@ -3266,7 +3291,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// eax: string
@@ -3489,7 +3514,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@@ -3801,7 +3826,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ bind(&miss);
@@ -4381,15 +4406,234 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorLoadStub stub(isolate(), state());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawLoadStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorKeyedLoadStub stub(isolate());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawKeyedLoadStub stub(isolate());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register feedback, bool is_polymorphic,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label next, next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+
+ __ push(receiver);
+ __ push(vector);
+
+ Register receiver_map = receiver;
+ Register cached_map = vector;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ mov(receiver_map, FieldOperand(receiver, 0));
+ __ bind(&compare_map);
+ __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+
+ // A named keyed load might have a 2 element array, all other cases can count
+ // on an array with at least 2 {map, handler} pairs, so they can go right
+ // into polymorphic array handling.
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, is_polymorphic ? &start_polymorphic : &next);
+
+ // found, now call handler.
+ Register handler = feedback;
+ __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ pop(vector);
+ __ pop(receiver);
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+
+ if (!is_polymorphic) {
+ __ bind(&next);
+ __ cmp(FieldOperand(feedback, FixedArray::kLengthOffset),
+ Immediate(Smi::FromInt(2)));
+ __ j(not_equal, &start_polymorphic);
+ __ pop(vector);
+ __ pop(receiver);
+ __ jmp(miss);
+ }
+
+ // Polymorphic, we have to loop from 2 to N
+ __ bind(&start_polymorphic);
+ __ push(key);
+ Register counter = key;
+ __ mov(counter, Immediate(Smi::FromInt(2)));
+ __ bind(&next_loop);
+ __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &prepare_next);
+ __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+
+ __ bind(&prepare_next);
+ __ add(counter, Immediate(Smi::FromInt(2)));
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(less, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ jmp(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register weak_cell, Label* miss) {
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+
+ // Move the weak map into the weak_cell register.
+ Register ic_map = weak_cell;
+ __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ cmp(ic_map, FieldOperand(receiver, 0));
+ __ j(not_equal, miss);
+ Register handler = weak_cell;
+ __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ __ bind(&compare_smi_map);
+ __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, miss);
+ __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+}
+
+
+void VectorRawLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // edx
+ Register name = VectorLoadICDescriptor::NameRegister(); // ecx
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // ebx
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // eax
+ Register scratch = edi;
+ __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicCase(masm, receiver, name, vector, slot, scratch, &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+ HandleArrayCases(masm, receiver, name, vector, slot, scratch, true, &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &miss);
+ __ push(slot);
+ __ push(vector);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::LOAD_IC, code_flags, false, receiver, name, vector, scratch);
+ __ pop(vector);
+ __ pop(slot);
+
+ __ bind(&miss);
+ LoadIC::GenerateMiss(masm);
+}
+
+
+void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // edx
+ Register key = VectorLoadICDescriptor::NameRegister(); // ecx
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // ebx
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // eax
+ Register feedback = edi;
+ __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmp(key, feedback);
+ __ j(not_equal, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss);
+
+ __ bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
}
@@ -4875,7 +5119,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ mov(eax, return_value_operand);
Label promote_scheduled_exception;
- Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -4887,7 +5130,17 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Assert(above_equal, kInvalidHandleScopeLevel);
__ cmp(edi, Operand::StaticVariable(limit_address));
__ j(not_equal, &delete_allocated_handles);
+
+ // Leave the API exit frame.
__ bind(&leave_exit_frame);
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ mov(esi, *context_restore_operand);
+ }
+ if (stack_space_operand != nullptr) {
+ __ mov(ebx, *stack_space_operand);
+ }
+ __ LeaveApiExitFrame(!restore_context);
// Check if the function scheduled an exception.
ExternalReference scheduled_exception_address =
@@ -4895,7 +5148,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate->factory()->the_hole_value()));
__ j(not_equal, &promote_scheduled_exception);
- __ bind(&exception_handled);
#if DEBUG
// Check if the function returned a valid JavaScript value.
@@ -4932,14 +5184,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&ok);
#endif
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ mov(esi, *context_restore_operand);
- }
- if (stack_space_operand != nullptr) {
- __ mov(ebx, *stack_space_operand);
- }
- __ LeaveApiExitFrame(!restore_context);
if (stack_space_operand != nullptr) {
DCHECK_EQ(0, stack_space);
__ pop(ecx);
@@ -4949,12 +5193,9 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ ret(stack_space * kPointerSize);
}
+ // Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kPromoteScheduledException, 0);
- }
- __ jmp(&exception_handled);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 34b33b2c17..6d1c0f6384 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -13,60 +13,61 @@
namespace v8 {
namespace internal {
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void PatchCodeWithCall(Address pc, Address target, int guard_bytes) {
+ // Call instruction takes up 5 bytes and int3 takes up one byte.
+ static const int kCallCodeSize = 5;
+ int code_size = kCallCodeSize + guard_bytes;
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- DCHECK(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
+ // Create a code patcher.
+ CodePatcher patcher(pc, code_size);
+// Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+#endif
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
-}
+ // Patch the code.
+ patcher.masm()->call(target, RelocInfo::NONE32);
+ // Check that the size of the code generated is as expected.
+ DCHECK_EQ(kCallCodeSize,
+ patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
+ // Add the requested number of int3 instructions after the call.
+ DCHECK_GE(guard_bytes, 0);
+ for (int i = 0; i < guard_bytes; i++) {
+ patcher.masm()->int3();
+ }
+
+ CpuFeatures::FlushICache(pc, code_size);
}
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
+// Patch the JS frame exit code with a debug break call. See
+// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
+// for the precise return instructions sequence.
+void BreakLocation::SetDebugBreakAtReturn() {
+ DCHECK(Assembler::kJSReturnSequenceLength >=
+ Assembler::kCallInstructionLength);
+ PatchCodeWithCall(
+ pc(), debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
+ Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
-void BreakLocationIterator::SetDebugBreakAtSlot() {
+void BreakLocation::SetDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
Isolate* isolate = debug_info_->GetIsolate();
- rinfo()->PatchCodeWithCall(
- isolate->builtins()->Slot_DebugBreak()->entry(),
+ PatchCodeWithCall(
+ pc(), isolate->builtins()->Slot_DebugBreak()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
}
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 27e308d298..5fbee322b6 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -228,7 +228,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
#define __ masm()->
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Save all general purpose registers before messing with them.
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 576c7393cc..f1fba341c6 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -826,11 +826,21 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5d:
+ AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5e:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5f:
+ AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
default:
UnimplementedInstruction();
}
@@ -1554,6 +1564,20 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x62) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("punpckldq %s,%s", NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
+ } else if (*data == 0x6A) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("punpckhdq %s,%s", NameOfXMMRegister(regop),
+ NameOfXMMRegister(rm));
+ data++;
} else if (*data == 0x76) {
data++;
int mod, regop, rm;
@@ -1744,7 +1768,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x58: mnem = "addsd"; break;
case 0x59: mnem = "mulsd"; break;
case 0x5C: mnem = "subsd"; break;
+ case 0x5D:
+ mnem = "minsd";
+ break;
case 0x5E: mnem = "divsd"; break;
+ case 0x5F:
+ mnem = "maxsd";
+ break;
}
data += 3;
int mod, regop, rm;
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index 1290ad6e09..f9d804f667 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -115,11 +115,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-inline void StackHandler::SetFp(Address slot, Address fp) {
- Memory::Address_at(slot) = fp;
-}
-
-
} } // namespace v8::internal
#endif // V8_IA32_FRAMES_IA32_H_
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 8596705512..1e578b4141 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -95,7 +95,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@@ -188,7 +189,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
- if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+ if (info->scope()->is_script_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
@@ -236,6 +237,11 @@ void FullCodeGenerator::Generate() {
}
}
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
@@ -244,6 +250,11 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
+ if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
+ --num_parameters;
+ ++rest_index;
+ }
+
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
@@ -284,10 +295,7 @@ void FullCodeGenerator::Generate() {
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+
ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
@@ -1453,7 +1461,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
- CallLoadIC(CONTEXTUAL);
+ CallGlobalLoadIC(var->name());
context()->Plug(eax);
break;
}
@@ -2098,7 +2106,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ mov(load_name, isolate()->factory()->throw_string()); // "throw"
__ push(load_name); // "throw"
__ push(Operand(esp, 2 * kPointerSize)); // iter
@@ -2110,16 +2117,17 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(eax); // result
- __ PushTryHandler(StackHandler::CATCH, expr->index());
- const int handler_size = StackHandlerConstants::kSize;
+ EnterTryBlock(expr->index(), &l_catch);
+ const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(eax); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + handler_size;
+ const int generator_object_depth = kPointerSize + try_block_size;
__ mov(eax, Operand(esp, generator_object_depth));
__ push(eax); // g
+ __ push(Immediate(Smi::FromInt(expr->index()))); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(l_continuation.pos())));
@@ -2127,13 +2135,13 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(ecx, esi);
__ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ pop(eax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in eax
- __ PopTryHandler();
+ ExitTryBlock(expr->index());
// receiver = iter; f = iter.next; arg = received;
__ bind(&l_next);
@@ -2479,6 +2487,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ push(Operand(esp, 0)); // prototype
}
EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ push(eax);
+ }
+
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@@ -2616,25 +2634,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(eax);
- __ push(esi);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip, Label::kNear);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2648,6 +2647,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &const_error, Label::kNear);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2669,8 +2683,33 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ DCHECK(var->mode() == CONST_LEGACY);
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ push(eax);
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &skip, Label::kNear);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ }
+ // Silently ignore store in sloppy mode.
}
}
@@ -3138,8 +3177,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- if (!ValidateSuperCall(expr)) return;
-
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(eax, new_target_var);
__ push(eax);
@@ -3651,8 +3688,8 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ GetMapConstructor(eax, eax, ebx);
+ __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
__ j(not_equal, &non_function_constructor);
// eax now contains the constructor function. Grab the
@@ -3955,7 +3992,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4003,7 +4040,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4175,7 +4212,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// Call runtime to perform the lookup.
__ push(cache);
__ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ bind(&done);
context()->Plug(eax);
@@ -4495,17 +4532,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- if (expr->function() != NULL &&
- expr->function()->intrinsic_type == Runtime::INLINE) {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
if (expr->is_jsruntime()) {
+ Comment cmnt(masm_, "[ CallRuntime");
// Push the builtins object as receiver.
__ mov(eax, GlobalObjectOperand());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
@@ -4525,9 +4556,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
+ // Push the arguments ("left-to-right").
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@@ -4537,21 +4566,33 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
__ mov(edi, Operand(esp, (arg_count + 1) * kPointerSize));
__ CallStub(&stub);
+
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, eax);
} else {
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
+ const Runtime::Function* function = expr->function();
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: { \
+ Comment cmnt(masm_, "[ Inline" #Name); \
+ return Emit##Name(expr); \
+ }
+ FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- context()->Plug(eax);
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(eax);
+ }
+ }
}
}
@@ -5203,17 +5244,6 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_obj));
__ push(edx);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(edx, Operand::StaticVariable(has_pending_message));
- __ SmiTag(edx);
- __ push(edx);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(edx, Operand::StaticVariable(pending_message_script));
- __ push(edx);
}
@@ -5221,17 +5251,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(edx));
// Restore pending message from stack.
__ pop(edx);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(Operand::StaticVariable(pending_message_script), edx);
-
- __ pop(edx);
- __ SmiUntag(edx);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(Operand::StaticVariable(has_pending_message), edx);
-
- __ pop(edx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(Operand::StaticVariable(pending_message_obj), edx);
@@ -5249,33 +5268,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ mov(esi, Operand(esp, StackHandlerConstants::kContextOffset));
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
- }
- __ PopTryHandler();
- __ call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-#undef __
-
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x11;
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index b0e57fc2e2..407b1c7bc8 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -228,6 +228,12 @@ void InternalArrayConstructorDescriptor::Initialize(
}
+void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {esi, eax};
data->Initialize(arraysize(registers), registers, NULL);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index d750cb87d5..f0cd7b0008 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -10,6 +10,7 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/hydrogen-osr.h"
#include "src/ia32/lithium-codegen-ia32.h"
@@ -140,7 +141,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
+ if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
// +1 for return address.
@@ -376,10 +377,11 @@ void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
bool LCodeGen::GenerateJumpTable() {
+ if (!jump_table_.length()) return !is_aborted();
+
Label needs_frame;
- if (jump_table_.length() > 0) {
- Comment(";;; -------------------- Jump table --------------------");
- }
+ Comment(";;; -------------------- Jump table --------------------");
+
for (int i = 0; i < jump_table_.length(); i++) {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
@@ -388,34 +390,55 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- if (needs_frame.is_bound()) {
- __ jmp(&needs_frame);
- } else {
- __ bind(&needs_frame);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push a PC inside the function so that the deopt code can find where
- // the deopt comes from. It doesn't have to be the precise return
- // address of a "calling" LAZY deopt, it only has to be somewhere
- // inside the code body.
- Label push_approx_pc;
- __ call(&push_approx_pc);
- __ bind(&push_approx_pc);
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 3 * kPointerSize));
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
+ __ call(&needs_frame);
} else {
if (info()->saves_caller_doubles()) RestoreCallerDoubles();
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
+ }
+ if (needs_frame.is_linked()) {
+ __ bind(&needs_frame);
+ /* stack layout
+ 4: entry address
+ 3: return address <-- esp
+ 2: garbage
+ 1: garbage
+ 0: garbage
+ */
+ __ sub(esp, Immediate(kPointerSize)); // Reserve space for stub marker.
+ __ push(MemOperand(esp, kPointerSize)); // Copy return address.
+ __ push(MemOperand(esp, 3 * kPointerSize)); // Copy entry address.
+
+ /* stack layout
+ 4: entry address
+ 3: return address
+ 2: garbage
+ 1: return address
+ 0: entry address <-- esp
+ */
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp); // Save ebp.
+ // Copy context.
+ __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+ // Fill ebp with the right stack frame address.
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ mov(MemOperand(esp, 2 * kPointerSize),
+ Immediate(Smi::FromInt(StackFrame::STUB)));
+
+ /* stack layout
+ 4: old ebp
+ 3: context pointer
+ 2: stub marker
+ 1: return address
+ 0: entry address <-- esp
+ */
+ __ ret(0); // Call the continuation without clobbering registers.
}
return !is_aborted();
}
@@ -861,12 +884,13 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
__ bind(&done);
}
- Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
@@ -2580,9 +2604,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
+ __ GetMapConstructor(temp, temp, temp2);
// Objects with a non-function constructor have class 'Object'.
- __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
+ __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ j(not_equal, is_true);
} else {
@@ -2839,16 +2863,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
- }
-}
-
-
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@@ -2878,30 +2892,12 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
- }
-
- // Store the value.
- __ mov(Operand::ForCell(cell_handle), value);
- // Cells are always rescanned, so no write barrier here.
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3014,8 +3010,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL,
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3241,7 +3238,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3693,7 +3692,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
__ bind(&non_zero);
}
- __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+ __ roundsd(xmm_scratch, input_reg, kRoundDown);
__ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x1);
@@ -3915,14 +3914,8 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
void LCodeGen::DoMathClz32(LMathClz32* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Label not_zero_input;
- __ bsr(result, input);
- __ j(not_zero, &not_zero_input);
- __ Move(result, Immediate(63)); // 63^31 == 32
-
- __ bind(&not_zero_input);
- __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+ __ Lzcnt(result, input);
}
@@ -4173,7 +4166,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
+ Handle<Code> ic =
+ StoreIC::initialize_stub(isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4350,8 +4345,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 1c8d075dcb..0bda914f6d 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -2132,14 +2132,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* global_object =
@@ -2155,13 +2147,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LStoreGlobalCell* result =
- new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 3f591705d2..4ea444a665 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -103,7 +103,6 @@ class LCodeGen;
V(LoadContextSlot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
@@ -144,7 +143,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1718,13 +1716,6 @@ class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@@ -1746,19 +1737,6 @@ class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreGlobalCell(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 53ffa39357..c243ca26d6 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -14,7 +14,6 @@
#include "src/debug.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -1023,44 +1022,21 @@ void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
}
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
+void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // We will build up the handler from the bottom by pushing on the stack.
- // First push the frame pointer and context.
- if (kind == StackHandler::JS_ENTRY) {
- // The frame pointer does not point to a JS frame so we save NULL for
- // ebp. We expect the code throwing an exception to check ebp before
- // dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
- push(Immediate(Smi::FromInt(0))); // No context.
- } else {
- push(ebp);
- push(esi);
- }
- // Push the state and the code object.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- push(Immediate(state));
- Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
push(Operand::StaticVariable(handler_address));
+
// Set this new handler as the current one.
mov(Operand::StaticVariable(handler_address), esp);
}
-void MacroAssembler::PopTryHandler() {
+void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
pop(Operand::StaticVariable(handler_address));
@@ -1068,103 +1044,6 @@ void MacroAssembler::PopTryHandler() {
}
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // eax = exception, edi = code object, edx = state.
- mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
- shr(edx, StackHandler::kKindWidth);
- mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
- SmiUntag(edx);
- lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
- jmp(edi);
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in eax.
- if (!value.is(eax)) {
- mov(eax, value);
- }
- // Drop the stack pointer to the top of the top handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- mov(esp, Operand::StaticVariable(handler_address));
- // Restore the next handler.
- pop(Operand::StaticVariable(handler_address));
-
- // Remove the code object and state, compute the handler address in edi.
- pop(edi); // Code object.
- pop(edx); // Index and state.
-
- // Restore the context and frame pointer.
- pop(esi); // Context.
- pop(ebp); // Frame pointer.
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
- // ebp or esi.
- Label skip;
- test(esi, esi);
- j(zero, &skip, Label::kNear);
- mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
- bind(&skip);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in eax.
- if (!value.is(eax)) {
- mov(eax, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the top ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind, Label::kNear);
- bind(&fetch_next);
- mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- test(Operand(esp, StackHandlerConstants::kStateOffset),
- Immediate(StackHandler::KindField::kMask));
- j(not_zero, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(Operand::StaticVariable(handler_address));
-
- // Remove the code object and state, compute the handler address in edi.
- pop(edi); // Code object.
- pop(edx); // Index and state.
-
- // Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(esi);
- pop(ebp);
-
- JumpToHandlerEntry();
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch1,
Register scratch2,
@@ -1919,6 +1798,20 @@ void MacroAssembler::NegativeZeroTest(Register result,
}
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+ Register temp) {
+ Label done, loop;
+ mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
+ bind(&loop);
+ JumpIfSmi(result, &done);
+ CmpObjectType(result, MAP_TYPE, temp);
+ j(not_equal, &done);
+ mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
+ jmp(&loop);
+ bind(&done);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -1970,7 +1863,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- mov(result, FieldOperand(result, Map::kConstructorOffset));
+ GetMapConstructor(result, result, scratch);
}
// All done.
@@ -2524,6 +2417,52 @@ void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
}
+void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
+ if (imm8 == 0) {
+ movd(dst, src);
+ return;
+ }
+ DCHECK_EQ(1, imm8);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrd(dst, src, imm8);
+ return;
+ }
+ pshufd(xmm0, src, 1);
+ movd(dst, xmm0);
+}
+
+
+void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
+ DCHECK(imm8 == 0 || imm8 == 1);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrd(dst, src, imm8);
+ return;
+ }
+ movd(xmm0, src);
+ if (imm8 == 1) {
+ punpckldq(dst, xmm0);
+ } else {
+ DCHECK_EQ(0, imm8);
+ psrlq(dst, 32);
+ punpckldq(xmm0, dst);
+ movaps(dst, xmm0);
+ }
+}
+
+
+void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
+ // TODO(intel): Add support for LZCNT (with ABM/BMI1).
+ Label not_zero_src;
+ bsr(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Move(dst, Immediate(63)); // 63^31 == 32
+ bind(&not_zero_src);
+ xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index e62c7d8b4d..0be458551b 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -570,17 +570,11 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link it into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
+ // Push a new stack handler and link it into stack handler chain.
+ void PushStackHandler();
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Throw to the top handler in the try hander chain.
- void Throw(Register value);
-
- // Throw past all JS frames to the top JS entry frame.
- void ThrowUncatchable(Register value);
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -718,6 +712,10 @@ class MacroAssembler: public Assembler {
void NegativeZeroTest(Register result, Register op1, Register op2,
Register scratch, Label* then_label);
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done.
+ void GetMapConstructor(Register result, Register map, Register temp);
+
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
@@ -815,6 +813,16 @@ class MacroAssembler: public Assembler {
void Push(Register src) { push(src); }
void Pop(Register dst) { pop(dst); }
+ // Non-SSE2 instructions.
+ void Pextrd(Register dst, XMMRegister src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
+ Pinsrd(dst, Operand(src), imm8);
+ }
+ void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+
+ void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
+ void Lzcnt(Register dst, const Operand& src);
+
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
@@ -1005,10 +1013,6 @@ class MacroAssembler: public Assembler {
Register bitmap_reg,
Register mask_reg);
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index bacf44dbe4..7f857ca5d9 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -197,11 +197,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
- __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
__ b(ne, miss);
@@ -428,6 +428,17 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ }
+
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -462,23 +473,14 @@ Register PropertyHandlerCompiler::CheckPrototypes(
} else {
Register map_reg = scratch1;
__ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(map_reg, cell, scratch2);
- __ b(ne, miss);
- }
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
+ } else if (depth != 1 || check == CHECK_ALL_MAPS) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ b(ne, miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -491,6 +493,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current_map = handle(current->map());
}
+ DCHECK(!current_map->IsJSGlobalProxyMap());
+
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
@@ -502,13 +506,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ b(ne, miss);
}
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
// Return the register containing the holder.
return reg;
}
@@ -731,7 +728,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Register result = StoreDescriptor::ValueRegister();
Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
__ LoadWeakValue(result, weak_cell, &miss);
- __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (is_configurable) {
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 12a2401294..36d88c0a2c 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -109,11 +109,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
- __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
}
@@ -345,7 +345,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Register result = StoreDescriptor::ValueRegister();
Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
__ LoadWeakValue(result, weak_cell, &miss);
- __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (is_configurable) {
@@ -477,6 +477,18 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ UseScratchRegisterScope temps(masm());
+ __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
+ }
+
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -511,26 +523,15 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register map_reg = scratch1;
__ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (depth != 1 || check == CHECK_ALL_MAPS) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (depth != 1 || check == CHECK_ALL_MAPS) {
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(map_reg, cell, scratch2);
__ B(ne, miss);
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- UseScratchRegisterScope temps(masm());
- __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- }
-
reg = holder_reg; // From now on the object will be in holder_reg.
__ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
@@ -541,6 +542,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current_map = handle(current->map());
}
+ DCHECK(!current_map->IsJSGlobalProxyMap());
+
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
@@ -553,13 +556,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ B(ne, miss);
}
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
// Return the register containing the holder.
return reg;
}
diff --git a/deps/v8/src/ic/handler-compiler.cc b/deps/v8/src/ic/handler-compiler.cc
index acf380fbee..f103f8daf1 100644
--- a/deps/v8/src/ic/handler-compiler.cc
+++ b/deps/v8/src/ic/handler-compiler.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/cpu-profiler.h"
#include "src/ic/call-optimization.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@@ -73,7 +74,7 @@ Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
Handle<Name> name) {
Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
Handle<Code> code = GetCodeWithFlags(flags, name);
- PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, *name));
+ PROFILE(isolate(), CodeCreateEvent(Logger::HANDLER_TAG, *code, *name));
#ifdef DEBUG
code->VerifyEmbeddedObjects();
#endif
@@ -279,6 +280,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
case LookupIterator::INTERCEPTOR:
case LookupIterator::JSPROXY:
case LookupIterator::NOT_FOUND:
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
break;
case LookupIterator::DATA:
inline_followup =
@@ -310,10 +312,36 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
Label miss;
InterceptorVectorSlotPush(receiver());
+ bool lost_holder_register = false;
+ auto holder_orig = holder();
+ // non masking interceptors must check the entire chain, so temporarily reset
+ // the holder to be that last element for the FrontendHeader call.
+ if (holder()->GetNamedInterceptor()->non_masking()) {
+ DCHECK(!inline_followup);
+ JSObject* last = *holder();
+ PrototypeIterator iter(isolate(), last);
+ while (!iter.IsAtEnd()) {
+ lost_holder_register = true;
+ last = JSObject::cast(iter.GetCurrent());
+ iter.Advance();
+ }
+ auto last_handle = handle(last);
+ set_holder(last_handle);
+ }
Register reg = FrontendHeader(receiver(), it->name(), &miss);
+ // Reset the holder so further calculations are correct.
+ set_holder(holder_orig);
+ if (lost_holder_register) {
+ if (*it->GetReceiver() == *holder()) {
+ reg = receiver();
+ } else {
+ // Reload lost holder register.
+ auto cell = isolate()->factory()->NewWeakCell(holder());
+ __ LoadWeakValue(reg, cell, &miss);
+ }
+ }
FrontendFooter(it->name(), &miss);
InterceptorVectorSlotPop(reg);
-
if (inline_followup) {
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
@@ -345,6 +373,7 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
case LookupIterator::INTERCEPTOR:
case LookupIterator::JSPROXY:
case LookupIterator::NOT_FOUND:
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::DATA: {
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 94b48be274..c02d83ce46 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -431,6 +431,17 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -465,26 +476,15 @@ Register PropertyHandlerCompiler::CheckPrototypes(
} else {
Register map_reg = scratch1;
__ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
- if (depth != 1 || check == CHECK_ALL_MAPS) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (depth != 1 || check == CHECK_ALL_MAPS) {
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(map_reg, cell, scratch2);
__ j(not_equal, miss);
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, map_reg, scratch2, miss);
- // Restore map_reg.
- __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- }
reg = holder_reg; // From now on the object will be in holder_reg.
__ mov(reg, FieldOperand(map_reg, Map::kPrototypeOffset));
}
@@ -494,6 +494,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current_map = handle(current->map());
}
+ DCHECK(!current_map->IsJSGlobalProxyMap());
+
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
@@ -505,13 +507,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ j(not_equal, miss);
}
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
// Return the register containing the holder.
return reg;
}
diff --git a/deps/v8/src/ic/ic-compiler.cc b/deps/v8/src/ic/ic-compiler.cc
index 08e0fa6e5a..f597083e63 100644
--- a/deps/v8/src/ic/ic-compiler.cc
+++ b/deps/v8/src/ic/ic-compiler.cc
@@ -4,6 +4,7 @@
#include "src/v8.h"
+#include "src/cpu-profiler.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic-inl.h"
#include "src/ic/ic-compiler.h"
@@ -196,6 +197,8 @@ Handle<Code> PropertyICCompiler::ComputeLoad(Isolate* isolate,
code = compiler.CompileLoadInitialize(flags);
} else if (ic_state == PREMONOMORPHIC) {
code = compiler.CompileLoadPreMonomorphic(flags);
+ } else if (ic_state == MEGAMORPHIC) {
+ code = compiler.CompileLoadMegamorphic(flags);
} else {
UNREACHABLE();
}
@@ -333,6 +336,14 @@ Handle<Code> PropertyICCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
}
+Handle<Code> PropertyICCompiler::CompileLoadMegamorphic(Code::Flags flags) {
+ MegamorphicLoadStub stub(isolate(), LoadICState(extra_ic_state_));
+ auto code = stub.GetCode();
+ PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
+ return code;
+}
+
+
Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
StoreIC::GenerateInitialize(masm());
Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
diff --git a/deps/v8/src/ic/ic-compiler.h b/deps/v8/src/ic/ic-compiler.h
index d1bd7a1dfc..a6c4e81ab6 100644
--- a/deps/v8/src/ic/ic-compiler.h
+++ b/deps/v8/src/ic/ic-compiler.h
@@ -70,6 +70,7 @@ class PropertyICCompiler : public PropertyAccessCompiler {
Handle<Code> CompileLoadInitialize(Code::Flags flags);
Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
+ Handle<Code> CompileLoadMegamorphic(Code::Flags flags);
Handle<Code> CompileStoreInitialize(Code::Flags flags);
Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
Handle<Code> CompileStoreGeneric(Code::Flags flags);
diff --git a/deps/v8/src/ic/ic-state.cc b/deps/v8/src/ic/ic-state.cc
index a38a27a34a..13c8e64216 100644
--- a/deps/v8/src/ic/ic-state.cc
+++ b/deps/v8/src/ic/ic-state.cc
@@ -45,18 +45,17 @@ STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::LAST_TOKEN;
BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
- : isolate_(isolate) {
+ : fixed_right_arg_(
+ HasFixedRightArgField::decode(extra_ic_state)
+ ? Just(1 << FixedRightArgValueField::decode(extra_ic_state))
+ : Nothing<int>()),
+ isolate_(isolate) {
op_ =
static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
- fixed_right_arg_ =
- Maybe<int>(HasFixedRightArgField::decode(extra_ic_state),
- 1 << FixedRightArgValueField::decode(extra_ic_state));
left_kind_ = LeftKindField::decode(extra_ic_state);
- if (fixed_right_arg_.has_value) {
- right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32;
- } else {
- right_kind_ = RightKindField::decode(extra_ic_state);
- }
+ right_kind_ = fixed_right_arg_.IsJust()
+ ? (Smi::IsValid(fixed_right_arg_.FromJust()) ? SMI : INT32)
+ : RightKindField::decode(extra_ic_state);
result_kind_ = ResultKindField::decode(extra_ic_state);
DCHECK_LE(FIRST_TOKEN, op_);
DCHECK_LE(op_, LAST_TOKEN);
@@ -67,10 +66,10 @@ ExtraICState BinaryOpICState::GetExtraICState() const {
ExtraICState extra_ic_state =
OpField::encode(op_ - FIRST_TOKEN) | LeftKindField::encode(left_kind_) |
ResultKindField::encode(result_kind_) |
- HasFixedRightArgField::encode(fixed_right_arg_.has_value);
- if (fixed_right_arg_.has_value) {
+ HasFixedRightArgField::encode(fixed_right_arg_.IsJust());
+ if (fixed_right_arg_.IsJust()) {
extra_ic_state = FixedRightArgValueField::update(
- extra_ic_state, WhichPowerOf2(fixed_right_arg_.value));
+ extra_ic_state, WhichPowerOf2(fixed_right_arg_.FromJust()));
} else {
extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
}
@@ -89,7 +88,7 @@ void BinaryOpICState::GenerateAheadOfTime(
do { \
BinaryOpICState state(isolate, op); \
state.left_kind_ = left_kind; \
- state.fixed_right_arg_.has_value = false; \
+ state.fixed_right_arg_ = Nothing<int>(); \
state.right_kind_ = right_kind; \
state.result_kind_ = result_kind; \
Generate(isolate, state); \
@@ -191,8 +190,7 @@ void BinaryOpICState::GenerateAheadOfTime(
do { \
BinaryOpICState state(isolate, op); \
state.left_kind_ = left_kind; \
- state.fixed_right_arg_.has_value = true; \
- state.fixed_right_arg_.value = fixed_right_arg_value; \
+ state.fixed_right_arg_ = Just(fixed_right_arg_value); \
state.right_kind_ = SMI; \
state.result_kind_ = result_kind; \
Generate(isolate, state); \
@@ -225,8 +223,8 @@ std::ostream& operator<<(std::ostream& os, const BinaryOpICState& s) {
os << "(" << Token::Name(s.op_);
if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
os << ":" << BinaryOpICState::KindToString(s.left_kind_) << "*";
- if (s.fixed_right_arg_.has_value) {
- os << s.fixed_right_arg_.value;
+ if (s.fixed_right_arg_.IsJust()) {
+ os << s.fixed_right_arg_.FromJust();
} else {
os << BinaryOpICState::KindToString(s.right_kind_);
}
@@ -248,9 +246,9 @@ void BinaryOpICState::Update(Handle<Object> left, Handle<Object> right,
base::bits::IsPowerOfTwo32(fixed_right_arg_value) &&
FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
(left_kind_ == SMI || left_kind_ == INT32) &&
- (result_kind_ == NONE || !fixed_right_arg_.has_value);
- fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg, fixed_right_arg_value);
-
+ (result_kind_ == NONE || !fixed_right_arg_.IsJust());
+ fixed_right_arg_ =
+ has_fixed_right_arg ? Just(fixed_right_arg_value) : Nothing<int32_t>();
result_kind_ = UpdateKind(result, result_kind_);
if (!Token::IsTruncatingBinaryOp(op_)) {
diff --git a/deps/v8/src/ic/ic-state.h b/deps/v8/src/ic/ic-state.h
index b5f58ed211..f35bac3522 100644
--- a/deps/v8/src/ic/ic-state.h
+++ b/deps/v8/src/ic/ic-state.h
@@ -63,6 +63,7 @@ class BinaryOpICState FINAL BASE_EMBEDDED {
left_kind_(NONE),
right_kind_(NONE),
result_kind_(NONE),
+ fixed_right_arg_(Nothing<int>()),
isolate_(isolate) {
DCHECK_LE(FIRST_TOKEN, op);
DCHECK_LE(op, LAST_TOKEN);
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index 93f33cf663..0ba80d3590 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -230,6 +230,14 @@ bool IC::AddressIsOptimizedCode() const {
}
+bool IC::AddressIsDeoptimizedCode() const {
+ Code* host =
+ isolate()->inner_pointer_to_code_cache()->GetCacheEntry(address())->code;
+ return host->kind() == Code::OPTIMIZED_FUNCTION &&
+ host->marked_for_deoptimization();
+}
+
+
static void LookupForRead(LookupIterator* it) {
for (; it->IsFound(); it->Next()) {
switch (it->state()) {
@@ -249,12 +257,12 @@ static void LookupForRead(LookupIterator* it) {
case LookupIterator::ACCESS_CHECK:
// PropertyHandlerCompiler::CheckPrototypes() knows how to emit
// access checks for global proxies.
- if (it->GetHolder<JSObject>()->IsJSGlobalProxy() &&
- it->HasAccess(v8::ACCESS_GET)) {
+ if (it->GetHolder<JSObject>()->IsJSGlobalProxy() && it->HasAccess()) {
break;
}
return;
case LookupIterator::ACCESSOR:
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::DATA:
return;
}
@@ -308,8 +316,7 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
if (it.state() == LookupIterator::ACCESS_CHECK) return false;
if (!it.IsFound()) return false;
- Handle<PropertyCell> cell = it.GetPropertyCell();
- return cell->type()->IsConstant();
+ return it.property_details().cell_type() == PropertyCellType::kConstant;
}
return true;
@@ -714,7 +721,7 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
- if (FLAG_harmony_scoping && object->IsGlobalObject() && name->IsString()) {
+ if (object->IsGlobalObject() && name->IsString()) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
@@ -942,12 +949,48 @@ Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
}
+Handle<Code> LoadIC::load_global(Isolate* isolate, Handle<GlobalObject> global,
+ Handle<String> name) {
+ // This special IC doesn't work with vector ics.
+ DCHECK(!FLAG_vector_ics);
+
+ Handle<ScriptContextTable> script_contexts(
+ global->native_context()->script_context_table());
+
+ ScriptContextTable::LookupResult lookup_result;
+ if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
+ return initialize_stub(isolate, LoadICState(CONTEXTUAL).GetExtraICState());
+ }
+
+ Handle<Map> global_map(global->map());
+ Handle<Code> handler = PropertyHandlerCompiler::Find(
+ name, global_map, Code::LOAD_IC, kCacheOnReceiver, Code::NORMAL);
+ if (handler.is_null()) {
+ LookupIterator it(global, name);
+ if (!it.IsFound() || !it.GetHolder<JSObject>().is_identical_to(global) ||
+ it.state() != LookupIterator::DATA) {
+ return initialize_stub(isolate,
+ LoadICState(CONTEXTUAL).GetExtraICState());
+ }
+ NamedLoadHandlerCompiler compiler(isolate, global_map, global,
+ kCacheOnReceiver);
+ Handle<PropertyCell> cell = it.GetPropertyCell();
+ handler = compiler.CompileLoadGlobal(cell, name, it.IsConfigurable());
+ Map::UpdateCodeCache(global_map, name, handler);
+ }
+ return PropertyICCompiler::ComputeMonomorphic(
+ Code::LOAD_IC, name, handle(global->map()), handler,
+ LoadICState(CONTEXTUAL).GetExtraICState());
+}
+
+
Handle<Code> LoadIC::initialize_stub_in_optimized_code(
- Isolate* isolate, ExtraICState extra_state) {
+ Isolate* isolate, ExtraICState extra_state, State initialization_state) {
if (FLAG_vector_ics) {
- return VectorLoadStub(isolate, LoadICState(extra_state)).GetCode();
+ return VectorRawLoadStub(isolate, LoadICState(extra_state)).GetCode();
}
- return initialize_stub(isolate, extra_state);
+ return PropertyICCompiler::ComputeLoad(isolate, initialization_state,
+ extra_state);
}
@@ -960,11 +1003,45 @@ Handle<Code> KeyedLoadIC::initialize_stub(Isolate* isolate) {
}
-Handle<Code> KeyedLoadIC::initialize_stub_in_optimized_code(Isolate* isolate) {
+Handle<Code> KeyedLoadIC::initialize_stub_in_optimized_code(
+ Isolate* isolate, State initialization_state) {
if (FLAG_vector_ics) {
- return VectorKeyedLoadStub(isolate).GetCode();
+ return VectorRawKeyedLoadStub(isolate).GetCode();
+ }
+ switch (initialization_state) {
+ case UNINITIALIZED:
+ return isolate->builtins()->KeyedLoadIC_Initialize();
+ case PREMONOMORPHIC:
+ return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
+ case MEGAMORPHIC:
+ return isolate->builtins()->KeyedLoadIC_Megamorphic();
+ default:
+ UNREACHABLE();
+ }
+ return Handle<Code>();
+}
+
+
+Handle<Code> KeyedStoreIC::initialize_stub(Isolate* isolate,
+ LanguageMode language_mode,
+ State initialization_state) {
+ switch (initialization_state) {
+ case UNINITIALIZED:
+ return is_strict(language_mode)
+ ? isolate->builtins()->KeyedStoreIC_Initialize_Strict()
+ : isolate->builtins()->KeyedStoreIC_Initialize();
+ case PREMONOMORPHIC:
+ return is_strict(language_mode)
+ ? isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict()
+ : isolate->builtins()->KeyedStoreIC_PreMonomorphic();
+ case MEGAMORPHIC:
+ return is_strict(language_mode)
+ ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
+ : isolate->builtins()->KeyedStoreIC_Megamorphic();
+ default:
+ UNREACHABLE();
}
- return initialize_stub(isolate);
+ return Handle<Code>();
}
@@ -1239,6 +1316,8 @@ Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
lookup->GetConstantIndex());
}
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return slow_stub();
case LookupIterator::ACCESS_CHECK:
case LookupIterator::JSPROXY:
case LookupIterator::NOT_FOUND:
@@ -1429,6 +1508,8 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
break;
case LookupIterator::ACCESSOR:
return !it->IsReadOnly();
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return false;
case LookupIterator::DATA: {
if (it->IsReadOnly()) return false;
Handle<JSObject> holder = it->GetHolder<JSObject>();
@@ -1453,7 +1534,6 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
}
}
- if (it->IsSpecialNumericIndex()) return false;
it->PrepareTransitionToDataProperty(value, NONE, store_mode);
return it->IsCacheableTransition();
}
@@ -1462,7 +1542,7 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Object> value,
JSReceiver::StoreFromKeyed store_mode) {
- if (FLAG_harmony_scoping && object->IsGlobalObject() && name->IsString()) {
+ if (object->IsGlobalObject() && name->IsString()) {
// Look up in script context table.
Handle<String> str_name = Handle<String>::cast(name);
Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
@@ -1474,7 +1554,16 @@ MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
Handle<Context> script_context = ScriptContextTable::GetContext(
script_contexts, lookup_result.context_index);
if (lookup_result.mode == CONST) {
- return TypeError("harmony_const_assign", object, name);
+ return TypeError("const_assign", object, name);
+ }
+
+ Handle<Object> previous_value =
+ FixedArray::get(script_context, lookup_result.slot_index);
+
+ if (*previous_value == *isolate()->factory()->the_hole_value()) {
+ // Do not install stubs and stay pre-monomorphic for
+ // uninitialized accesses.
+ return ReferenceError("not_defined", name);
}
if (FLAG_use_ic &&
@@ -1560,10 +1649,14 @@ Handle<Code> CallIC::initialize_stub_in_optimized_code(
Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
- LanguageMode language_mode) {
+ LanguageMode language_mode,
+ State initialization_state) {
+ DCHECK(initialization_state == UNINITIALIZED ||
+ initialization_state == PREMONOMORPHIC ||
+ initialization_state == MEGAMORPHIC);
ExtraICState extra_state = ComputeExtraICState(language_mode);
- Handle<Code> ic =
- PropertyICCompiler::ComputeStore(isolate, UNINITIALIZED, extra_state);
+ Handle<Code> ic = PropertyICCompiler::ComputeStore(
+ isolate, initialization_state, extra_state);
return ic;
}
@@ -1623,9 +1716,8 @@ void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
static Handle<Code> PropertyCellStoreHandler(
Isolate* isolate, Handle<JSObject> receiver, Handle<GlobalObject> holder,
- Handle<Name> name, Handle<PropertyCell> cell, Handle<Object> value) {
- auto union_type = PropertyCell::UpdatedType(cell, value);
- StoreGlobalStub stub(isolate, union_type->IsConstant(),
+ Handle<Name> name, Handle<PropertyCell> cell, PropertyCellType type) {
+ StoreGlobalStub stub(isolate, type == PropertyCellType::kConstant,
receiver->IsJSGlobalProxy());
auto code = stub.GetCodeCopyFromTemplate(holder, cell);
// TODO(verwaest): Move caching of these NORMAL stubs outside as well.
@@ -1648,10 +1740,14 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
case LookupIterator::TRANSITION: {
auto store_target = lookup->GetStoreTarget();
if (store_target->IsGlobalObject()) {
- auto cell = lookup->GetTransitionPropertyCell();
- return PropertyCellStoreHandler(
+ // TODO(dcarney): this currently just deopts. Use the transition cell.
+ auto cell = isolate()->factory()->NewPropertyCell();
+ cell->set_value(*value);
+ auto code = PropertyCellStoreHandler(
isolate(), store_target, Handle<GlobalObject>::cast(store_target),
- lookup->name(), cell, value);
+ lookup->name(), cell, PropertyCellType::kConstant);
+ cell->set_value(isolate()->heap()->the_hole_value());
+ return code;
}
Handle<Map> transition = lookup->transition_map();
// Currently not handled by CompileStoreTransition.
@@ -1722,9 +1818,11 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
DCHECK(holder.is_identical_to(receiver) ||
receiver->map()->prototype() == *holder);
auto cell = lookup->GetPropertyCell();
+ auto union_type = PropertyCell::UpdatedType(
+ cell, value, lookup->property_details());
return PropertyCellStoreHandler(isolate(), receiver,
Handle<GlobalObject>::cast(holder),
- lookup->name(), cell, value);
+ lookup->name(), cell, union_type);
}
DCHECK(holder.is_identical_to(receiver));
return isolate()->builtins()->StoreIC_Normal();
@@ -1754,6 +1852,7 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
break;
}
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::ACCESS_CHECK:
case LookupIterator::JSPROXY:
case LookupIterator::NOT_FOUND:
@@ -2100,7 +2199,9 @@ MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub");
}
DCHECK(!stub.is_null());
- set_target(*stub);
+ if (!AddressIsDeoptimizedCode()) {
+ set_target(*stub);
+ }
TRACE_IC("StoreIC", key);
return store_handle;
@@ -2477,9 +2578,17 @@ MaybeHandle<Object> BinaryOpIC::Transition(
isolate(), result, Execution::Call(isolate(), function, left, 1, &right),
Object);
+ // Do not try to update the target if the code was marked for lazy
+ // deoptimization. (Since we do not relocate addresses in these
+ // code objects, an attempt to access the target could fail.)
+ if (AddressIsDeoptimizedCode()) {
+ return result;
+ }
+
// Execution::Call can execute arbitrary JavaScript, hence potentially
// update the state of this very IC, so we must update the stored state.
UpdateTarget();
+
// Compute the new state.
BinaryOpICState old_state(isolate(), target()->extra_ic_state());
state.Update(left, right, result);
@@ -2805,30 +2914,10 @@ RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
Handle<JSObject> holder =
args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor_info(holder->GetNamedInterceptor());
-
- if (name->IsSymbol() && !interceptor_info->can_intercept_symbols())
- return isolate->heap()->no_interceptor_result_sentinel();
-
- Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
- v8::GenericNamedPropertyGetterCallback getter =
- FUNCTION_CAST<v8::GenericNamedPropertyGetterCallback>(getter_address);
- DCHECK(getter != NULL);
-
- PropertyCallbackArguments callback_args(isolate, interceptor_info->data(),
- *receiver, *holder);
- {
- // Use the interceptor getter.
- v8::Handle<v8::Value> r =
- callback_args.Call(getter, v8::Utils::ToLocal(name));
- RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
- if (!r.IsEmpty()) {
- Handle<Object> result = v8::Utils::OpenHandle(*r);
- result->VerifyApiCallResultType();
- return *v8::Utils::OpenHandle(*r);
- }
- }
-
+ auto res = JSObject::GetPropertyWithInterceptor(holder, receiver, name);
+ RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+ Handle<Object> result;
+ if (res.ToHandle(&result)) return *result;
return isolate->heap()->no_interceptor_result_sentinel();
}
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 8c1c82eac7..4da6e7cecc 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -104,12 +104,12 @@ class IC {
static bool IsCleared(Code* code) {
InlineCacheState state = code->ic_state();
- return state == UNINITIALIZED || state == PREMONOMORPHIC;
+ return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
}
static bool IsCleared(FeedbackNexus* nexus) {
InlineCacheState state = nexus->StateFromFeedback();
- return state == UNINITIALIZED || state == PREMONOMORPHIC;
+ return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
}
static bool ICUseVector(Code::Kind kind) {
@@ -134,6 +134,7 @@ class IC {
Code* GetOriginalCode() const;
bool AddressIsOptimizedCode() const;
+ bool AddressIsDeoptimizedCode() const;
// Set the call-site target.
inline void set_target(Code* code);
@@ -396,7 +397,9 @@ class LoadIC : public IC {
static Handle<Code> initialize_stub(Isolate* isolate,
ExtraICState extra_state);
static Handle<Code> initialize_stub_in_optimized_code(
- Isolate* isolate, ExtraICState extra_state);
+ Isolate* isolate, ExtraICState extra_state, State initialization_state);
+ static Handle<Code> load_global(Isolate* isolate, Handle<GlobalObject> global,
+ Handle<String> name);
MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
Handle<Name> name);
@@ -481,7 +484,8 @@ class KeyedLoadIC : public LoadIC {
(1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
static Handle<Code> initialize_stub(Isolate* isolate);
- static Handle<Code> initialize_stub_in_optimized_code(Isolate* isolate);
+ static Handle<Code> initialize_stub_in_optimized_code(
+ Isolate* isolate, State initialization_state);
static Handle<Code> ChooseMegamorphicStub(Isolate* isolate);
static Handle<Code> pre_monomorphic_stub(Isolate* isolate);
@@ -539,7 +543,8 @@ class StoreIC : public IC {
LanguageMode language_mode);
static Handle<Code> initialize_stub(Isolate* isolate,
- LanguageMode language_mode);
+ LanguageMode language_mode,
+ State initialization_state);
MUST_USE_RESULT MaybeHandle<Object> Store(
Handle<Object> object, Handle<Name> name, Handle<Object> value,
@@ -631,6 +636,10 @@ class KeyedStoreIC : public StoreIC {
LanguageMode language_mode);
static void GenerateSloppyArguments(MacroAssembler* masm);
+ static Handle<Code> initialize_stub(Isolate* isolate,
+ LanguageMode language_mode,
+ State initialization_state);
+
protected:
virtual Handle<Code> pre_monomorphic_stub() const {
return pre_monomorphic_stub(isolate(), language_mode());
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 93106ea0e1..83f57bc0ca 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -193,11 +193,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
- __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ lw(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
}
@@ -418,6 +418,17 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ }
+
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -452,23 +463,14 @@ Register PropertyHandlerCompiler::CheckPrototypes(
} else {
Register map_reg = scratch1;
__ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch2, Operand(map_reg));
- }
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
+ } else if (depth != 1 || check == CHECK_ALL_MAPS) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ GetWeakValue(scratch2, cell);
+ __ Branch(miss, ne, scratch2, Operand(map_reg));
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -481,6 +483,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current_map = handle(current->map());
}
+ DCHECK(!current_map->IsJSGlobalProxyMap());
+
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
@@ -492,13 +496,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ Branch(miss, ne, scratch2, Operand(scratch1));
}
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
// Return the register containing the holder.
return reg;
}
@@ -721,7 +718,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Register result = StoreDescriptor::ValueRegister();
Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
__ LoadWeakValue(result, weak_cell, &miss);
- __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ lw(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (is_configurable) {
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index a68a418fa2..90eecaaf0e 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -194,11 +194,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
- __ ld(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
}
@@ -419,6 +419,17 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ }
+
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -453,23 +464,14 @@ Register PropertyHandlerCompiler::CheckPrototypes(
} else {
Register map_reg = scratch1;
__ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ GetWeakValue(scratch2, cell);
- __ Branch(miss, ne, scratch2, Operand(map_reg));
- }
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
+ } else if (depth != 1 || check == CHECK_ALL_MAPS) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ GetWeakValue(scratch2, cell);
+ __ Branch(miss, ne, scratch2, Operand(map_reg));
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -482,6 +484,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current_map = handle(current->map());
}
+ DCHECK(!current_map->IsJSGlobalProxyMap());
+
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
@@ -493,13 +497,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ Branch(miss, ne, scratch2, Operand(scratch1));
}
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
// Return the register containing the holder.
return reg;
}
@@ -722,7 +719,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Register result = StoreDescriptor::ValueRegister();
Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
__ LoadWeakValue(result, weak_cell, &miss);
- __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ ld(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (is_configurable) {
diff --git a/deps/v8/src/ic/mips64/ic-mips64.cc b/deps/v8/src/ic/mips64/ic-mips64.cc
index 7b22b8c3e2..1ec49c45d4 100644
--- a/deps/v8/src/ic/mips64/ic-mips64.cc
+++ b/deps/v8/src/ic/mips64/ic-mips64.cc
@@ -668,7 +668,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ daddu(address, address, at);
__ lw(scratch_value, MemOperand(address));
__ Branch(&fast_double_without_map_check, ne, scratch_value,
- Operand(kHoleNanUpper32));
+ Operand(static_cast<int32_t>(kHoleNanUpper32)));
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
slow);
@@ -676,7 +676,7 @@ static void KeyedStoreGenerateMegamorphicHelper(
__ StoreNumberToDoubleElements(value, key,
elements, // Overwritten.
a3, // Scratch regs...
- a4, a5, &transition_double_elements);
+ a4, &transition_double_elements);
if (increment_length == kIncrementLength) {
// Add 1 to receiver->length.
__ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
diff --git a/deps/v8/src/ic/ppc/OWNERS b/deps/v8/src/ic/ppc/OWNERS
new file mode 100644
index 0000000000..beecb3d0b1
--- /dev/null
+++ b/deps/v8/src/ic/ppc/OWNERS
@@ -0,0 +1,3 @@
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 4283d39f22..d7a70d7446 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -17,22 +17,21 @@ namespace internal {
void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Register holder, int accessor_index, int expected_arguments,
- Register scratch) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- r3 : receiver
// -- r5 : name
// -- lr : return address
// -----------------------------------
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
if (accessor_index >= 0) {
DCHECK(!holder.is(scratch));
DCHECK(!receiver.is(scratch));
// Call the JavaScript getter with the receiver on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
@@ -57,14 +56,13 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
- MacroAssembler* masm, Handle<HeapType> type, Register receiver,
- Register holder, int accessor_index, int expected_arguments,
- Register scratch) {
+ MacroAssembler* masm, Handle<Map> map, Register receiver, Register holder,
+ int accessor_index, int expected_arguments, Register scratch) {
// ----------- S t a t e -------------
// -- lr : return address
// -----------------------------------
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Save value register, so we can restore it later.
__ push(value());
@@ -74,7 +72,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
DCHECK(!receiver.is(scratch));
DCHECK(!value().is(scratch));
// Call the JavaScript setter with receiver and value on the stack.
- if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+ if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
__ LoadP(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
@@ -197,11 +195,11 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
void PropertyHandlerCompiler::GenerateCheckPropertyCell(
MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
Register scratch, Label* miss) {
- Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+ Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
DCHECK(cell->value()->IsTheHole());
Handle<WeakCell> weak_cell = masm->isolate()->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
- __ LoadP(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+ __ LoadP(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
__ bne(miss);
@@ -414,7 +412,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register object_reg, Register holder_reg, Register scratch1,
Register scratch2, Handle<Name> name, Label* miss,
PrototypeCheckType check) {
- Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+ Handle<Map> receiver_map = map();
// Make sure there's no overlap between holder and object registers.
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
@@ -426,9 +424,19 @@ Register PropertyHandlerCompiler::CheckPrototypes(
int depth = 0;
Handle<JSObject> current = Handle<JSObject>::null();
- if (type()->IsConstant()) {
- current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+ if (receiver_map->IsJSGlobalObjectMap()) {
+ current = isolate()->global_object();
}
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ }
+
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -463,23 +471,14 @@ Register PropertyHandlerCompiler::CheckPrototypes(
} else {
Register map_reg = scratch1;
__ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
- if (depth != 1 || check == CHECK_ALL_MAPS) {
- Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
- __ CmpWeakValue(map_reg, cell, scratch2);
- __ bne(miss);
- }
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
+ if (current_map->IsJSGlobalObjectMap()) {
GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
name, scratch2, miss);
+ } else if (depth != 1 || check == CHECK_ALL_MAPS) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+ __ CmpWeakValue(map_reg, cell, scratch2);
+ __ bne(miss);
}
reg = holder_reg; // From now on the object will be in holder_reg.
@@ -503,13 +502,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ bne(miss);
}
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
// Return the register containing the holder.
return reg;
}
@@ -619,7 +611,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
// Save necessary data before invoking an interceptor.
// Requires a frame to make GC aware of pushed pointers.
{
- FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
+ FrameScope frame_scope(masm(), StackFrame::INTERNAL);
if (must_preserve_receiver_reg) {
__ Push(receiver(), holder_reg, this->name());
} else {
@@ -671,11 +663,20 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
- Handle<JSObject> object, Handle<Name> name, int accessor_index) {
+ Handle<JSObject> object, Handle<Name> name,
+ Handle<ExecutableAccessorInfo> callback) {
Register holder_reg = Frontend(name);
__ Push(receiver(), holder_reg); // receiver
- __ LoadSmiLiteral(ip, Smi::FromInt(accessor_index));
+
+ // If the callback cannot leak, then push the callback directly,
+ // otherwise wrap it in a weak cell.
+ if (callback->data()->IsUndefined() || callback->data()->IsSmi()) {
+ __ mov(ip, Operand(callback));
+ } else {
+ Handle<WeakCell> cell = isolate()->factory()->NewWeakCell(callback);
+ __ mov(ip, Operand(cell));
+ }
__ push(ip);
__ mov(ip, Operand(name));
__ Push(ip, value());
@@ -721,7 +722,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
Register result = StoreDescriptor::ValueRegister();
Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
__ LoadWeakValue(result, weak_cell, &miss);
- __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+ __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (is_configurable) {
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
index 820ab929ca..9f33a59e7b 100644
--- a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
@@ -30,7 +30,7 @@ void PropertyICCompiler::GenerateRuntimeSetProperty(
#define __ ACCESS_MASM(masm())
-Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+Handle<Code> PropertyICCompiler::CompilePolymorphic(MapHandleList* maps,
CodeHandleList* handlers,
Handle<Name> name,
Code::StubType type,
@@ -57,7 +57,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
}
Label number_case;
- Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+ Label* smi_target = IncludesNumberMap(maps) ? &number_case : &miss;
__ JumpIfSmi(receiver(), smi_target);
// Polymorphic keyed stores may use the map register
@@ -65,21 +65,23 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
DCHECK(kind() != Code::KEYED_STORE_IC ||
map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
- int receiver_count = types->length();
+ int receiver_count = maps->length();
int number_of_handled_maps = 0;
__ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
for (int current = 0; current < receiver_count; ++current) {
- Handle<HeapType> type = types->at(current);
- Handle<Map> map = IC::TypeToMap(*type, isolate());
+ Handle<Map> map = maps->at(current);
if (!map->is_deprecated()) {
number_of_handled_maps++;
Handle<WeakCell> cell = Map::WeakCellForMap(map);
__ CmpWeakValue(map_reg, cell, scratch2());
- if (type->Is(HeapType::Number())) {
+ Label next;
+ __ bne(&next);
+ if (map->instance_type() == HEAP_NUMBER_TYPE) {
DCHECK(!number_case.is_unused());
__ bind(&number_case);
}
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
+ __ bind(&next);
}
}
DCHECK(number_of_handled_maps != 0);
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index 35a4acf8cc..e3a4938d6a 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -118,7 +118,7 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
int offset = PrimaryOffset(*name, flags, map);
if (entry(primary_, offset) == &primary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ TypeFeedbackOracle::IsRelevantFeedback(map, *native_context)) {
types->AddMapIfMissing(Handle<Map>(map), zone);
}
}
@@ -137,7 +137,7 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
// Lookup in secondary table and add matches.
int offset = SecondaryOffset(*name, flags, primary_offset);
if (entry(secondary_, offset) == &secondary_[i] &&
- !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+ TypeFeedbackOracle::IsRelevantFeedback(map, *native_context)) {
types->AddMapIfMissing(Handle<Map>(map), zone);
}
}
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index 485d87dc57..b6add9d2bc 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -209,7 +209,8 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
Factory* factory = masm->isolate()->factory();
Handle<WeakCell> weak_cell = factory->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
- __ Cmp(FieldOperand(scratch, Cell::kValueOffset), factory->the_hole_value());
+ __ Cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+ factory->the_hole_value());
__ j(not_equal, miss);
}
@@ -431,6 +432,17 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ }
+
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -466,24 +478,15 @@ Register PropertyHandlerCompiler::CheckPrototypes(
Register map_reg = scratch1;
__ movp(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
- if (depth != 1 || check == CHECK_ALL_MAPS) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (depth != 1 || check == CHECK_ALL_MAPS) {
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(map_reg, cell, scratch2);
__ j(not_equal, miss);
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- }
reg = holder_reg; // From now on the object will be in holder_reg.
__ movp(reg, FieldOperand(map_reg, Map::kPrototypeOffset));
@@ -494,6 +497,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current_map = handle(current->map());
}
+ DCHECK(!current_map->IsJSGlobalProxyMap());
+
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
@@ -504,13 +509,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ j(not_equal, miss);
}
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
- }
-
// Return the register containing the holder.
return reg;
}
diff --git a/deps/v8/src/ic/x87/handler-compiler-x87.cc b/deps/v8/src/ic/x87/handler-compiler-x87.cc
index 2eb10c3a3b..ce902757f1 100644
--- a/deps/v8/src/ic/x87/handler-compiler-x87.cc
+++ b/deps/v8/src/ic/x87/handler-compiler-x87.cc
@@ -431,6 +431,17 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (receiver_map->IsJSGlobalObjectMap()) {
current = isolate()->global_object();
}
+
+ // Check access rights to the global object. This has to happen after
+ // the map check so that we know that the object is actually a global
+ // object.
+ // This allows us to install generated handlers for accesses to the
+ // global proxy (as opposed to using slow ICs). See corresponding code
+ // in LookupForRead().
+ if (receiver_map->IsJSGlobalProxyMap()) {
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+ }
+
Handle<JSObject> prototype = Handle<JSObject>::null();
Handle<Map> current_map = receiver_map;
Handle<Map> holder_map(holder()->map());
@@ -465,26 +476,15 @@ Register PropertyHandlerCompiler::CheckPrototypes(
} else {
Register map_reg = scratch1;
__ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
- if (depth != 1 || check == CHECK_ALL_MAPS) {
+ if (current_map->IsJSGlobalObjectMap()) {
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+ name, scratch2, miss);
+ } else if (depth != 1 || check == CHECK_ALL_MAPS) {
Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
__ CmpWeakValue(map_reg, cell, scratch2);
__ j(not_equal, miss);
}
- // Check access rights to the global object. This has to happen after
- // the map check so that we know that the object is actually a global
- // object.
- // This allows us to install generated handlers for accesses to the
- // global proxy (as opposed to using slow ICs). See corresponding code
- // in LookupForRead().
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, map_reg, scratch2, miss);
- // Restore map_reg.
- __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
- } else if (current_map->IsJSGlobalObjectMap()) {
- GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
- name, scratch2, miss);
- }
reg = holder_reg; // From now on the object will be in holder_reg.
__ mov(reg, FieldOperand(map_reg, Map::kPrototypeOffset));
}
@@ -494,6 +494,8 @@ Register PropertyHandlerCompiler::CheckPrototypes(
current_map = handle(current->map());
}
+ DCHECK(!current_map->IsJSGlobalProxyMap());
+
// Log the check depth.
LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
@@ -505,13 +507,6 @@ Register PropertyHandlerCompiler::CheckPrototypes(
__ j(not_equal, miss);
}
- // Perform security check for access to the global object.
- DCHECK(current_map->IsJSGlobalProxyMap() ||
- !current_map->is_access_check_needed());
- if (current_map->IsJSGlobalProxyMap()) {
- __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
- }
-
// Return the register containing the holder.
return reg;
}
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index 51fb9204f2..a100123e10 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -40,6 +40,7 @@ class PlatformInterfaceDescriptor;
V(ArrayConstructor) \
V(InternalArrayConstructorConstantArgCount) \
V(InternalArrayConstructor) \
+ V(Compare) \
V(CompareNil) \
V(ToBoolean) \
V(BinaryOp) \
@@ -406,6 +407,12 @@ class InternalArrayConstructorDescriptor : public CallInterfaceDescriptor {
};
+class CompareDescriptor : public CallInterfaceDescriptor {
+ public:
+ DECLARE_DESCRIPTOR(CompareDescriptor, CallInterfaceDescriptor)
+};
+
+
class CompareNilDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(CompareNilDescriptor, CallInterfaceDescriptor)
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 2c8367d7a1..e9713dbec0 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -34,8 +34,8 @@
#include "src/runtime-profiler.h"
#include "src/sampler.h"
#include "src/scopeinfo.h"
-#include "src/serialize.h"
#include "src/simulator.h"
+#include "src/snapshot/serialize.h"
#include "src/version.h"
#include "src/vm-state-inl.h"
@@ -82,16 +82,13 @@ void ThreadLocalTop::InitializeInternal() {
external_caught_exception_ = false;
failed_access_check_callback_ = NULL;
save_context_ = NULL;
- catcher_ = NULL;
promise_on_stack_ = NULL;
// These members are re-initialized later after deserialization
// is complete.
pending_exception_ = NULL;
- has_pending_message_ = false;
rethrowing_message_ = false;
pending_message_obj_ = NULL;
- pending_message_script_ = NULL;
scheduled_exception_ = NULL;
}
@@ -190,7 +187,6 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
v->VisitPointer(&thread->pending_exception_);
v->VisitPointer(&(thread->pending_message_obj_));
- v->VisitPointer(bit_cast<Object**>(&(thread->pending_message_script_)));
v->VisitPointer(bit_cast<Object**>(&(thread->context_)));
v->VisitPointer(&thread->scheduled_exception_);
@@ -199,7 +195,6 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
block = block->next_) {
v->VisitPointer(bit_cast<Object**>(&(block->exception_)));
v->VisitPointer(bit_cast<Object**>(&(block->message_obj_)));
- v->VisitPointer(bit_cast<Object**>(&(block->message_script_)));
}
// Iterate over pointers on native execution stack.
@@ -255,7 +250,6 @@ void Isolate::RegisterTryCatchHandler(v8::TryCatch* that) {
void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
DCHECK(thread_local_top()->try_catch_handler() == that);
thread_local_top()->set_try_catch_handler(that->next_);
- thread_local_top()->catcher_ = NULL;
}
@@ -550,7 +544,7 @@ class CaptureStackTraceHelper {
}
if (!function_key_.is_null()) {
- Handle<Object> fun_name(fun->shared()->DebugName(), isolate_);
+ Handle<Object> fun_name = JSFunction::GetDebugName(fun);
JSObject::AddProperty(stack_frame, function_key_, fun_name, NONE);
}
@@ -611,7 +605,7 @@ Handle<JSArray> Isolate::GetDetailedFromSimpleStackTrace(
Address pc = code->address() + offset->value();
bool is_constructor =
recv->IsJSObject() &&
- Handle<JSObject>::cast(recv)->map()->constructor() == *fun;
+ Handle<JSObject>::cast(recv)->map()->GetConstructor() == *fun;
Handle<JSObject> stack_frame =
helper.NewStackFrameObject(fun, code, pc, is_constructor);
@@ -724,7 +718,9 @@ void Isolate::SetFailedAccessCheckCallback(
static inline AccessCheckInfo* GetAccessCheckInfo(Isolate* isolate,
Handle<JSObject> receiver) {
- JSFunction* constructor = JSFunction::cast(receiver->map()->constructor());
+ Object* maybe_constructor = receiver->map()->GetConstructor();
+ if (!maybe_constructor->IsJSFunction()) return NULL;
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
if (!constructor->shared()->IsApiFunction()) return NULL;
Object* data_obj =
@@ -735,15 +731,16 @@ static inline AccessCheckInfo* GetAccessCheckInfo(Isolate* isolate,
}
-void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver,
- v8::AccessType type) {
+static void ThrowAccessCheckError(Isolate* isolate) {
+ Handle<String> message =
+ isolate->factory()->InternalizeUtf8String("no access");
+ isolate->ScheduleThrow(*isolate->factory()->NewTypeError(message));
+}
+
+
+void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver) {
if (!thread_local_top()->failed_access_check_callback_) {
- Handle<String> message = factory()->InternalizeUtf8String("no access");
- Handle<Object> error;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- this, error, factory()->NewTypeError(message), /* void */);
- ScheduleThrow(*error);
- return;
+ return ThrowAccessCheckError(this);
}
DCHECK(receiver->IsAccessCheckNeeded());
@@ -754,47 +751,17 @@ void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver,
Handle<Object> data;
{ DisallowHeapAllocation no_gc;
AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
- if (!access_check_info) return;
+ if (!access_check_info) {
+ AllowHeapAllocation doesnt_matter_anymore;
+ return ThrowAccessCheckError(this);
+ }
data = handle(access_check_info->data(), this);
}
// Leaving JavaScript.
VMState<EXTERNAL> state(this);
thread_local_top()->failed_access_check_callback_(
- v8::Utils::ToLocal(receiver),
- type,
- v8::Utils::ToLocal(data));
-}
-
-
-enum MayAccessDecision {
- YES, NO, UNKNOWN
-};
-
-
-static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
- Handle<JSObject> receiver,
- v8::AccessType type) {
- DisallowHeapAllocation no_gc;
- // During bootstrapping, callback functions are not enabled yet.
- if (isolate->bootstrapper()->IsActive()) return YES;
-
- if (receiver->IsJSGlobalProxy()) {
- Object* receiver_context = JSGlobalProxy::cast(*receiver)->native_context();
- if (!receiver_context->IsContext()) return NO;
-
- // Get the native context of current top context.
- // avoid using Isolate::native_context() because it uses Handle.
- Context* native_context =
- isolate->context()->global_object()->native_context();
- if (receiver_context == native_context) return YES;
-
- if (Context::cast(receiver_context)->security_token() ==
- native_context->security_token())
- return YES;
- }
-
- return UNKNOWN;
+ v8::Utils::ToLocal(receiver), v8::ACCESS_HAS, v8::Utils::ToLocal(data));
}
@@ -810,21 +777,33 @@ bool Isolate::IsInternallyUsedPropertyName(Object* name) {
}
-bool Isolate::MayNamedAccess(Handle<JSObject> receiver,
- Handle<Object> key,
- v8::AccessType type) {
+bool Isolate::MayAccess(Handle<JSObject> receiver) {
DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
- // Skip checks for internally used properties. Note, we do not
- // require existence of a context in this case.
- if (IsInternallyUsedPropertyName(key)) return true;
-
// Check for compatibility between the security tokens in the
// current lexical context and the accessed object.
DCHECK(context());
- MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
- if (decision != UNKNOWN) return decision == YES;
+ {
+ DisallowHeapAllocation no_gc;
+ // During bootstrapping, callback functions are not enabled yet.
+ if (bootstrapper()->IsActive()) return true;
+
+ if (receiver->IsJSGlobalProxy()) {
+ Object* receiver_context =
+ JSGlobalProxy::cast(*receiver)->native_context();
+ if (!receiver_context->IsContext()) return false;
+
+ // Get the native context of current top context.
+ // avoid using Isolate::native_context() because it uses Handle.
+ Context* native_context = context()->global_object()->native_context();
+ if (receiver_context == native_context) return true;
+
+ if (Context::cast(receiver_context)->security_token() ==
+ native_context->security_token())
+ return true;
+ }
+ }
HandleScope scope(this);
Handle<Object> data;
@@ -838,47 +817,13 @@ bool Isolate::MayNamedAccess(Handle<JSObject> receiver,
data = handle(access_check_info->data(), this);
}
- LOG(this, ApiNamedSecurityCheck(*key));
+ LOG(this, ApiSecurityCheck());
// Leaving JavaScript.
VMState<EXTERNAL> state(this);
- return callback(v8::Utils::ToLocal(receiver),
- v8::Utils::ToLocal(key),
- type,
- v8::Utils::ToLocal(data));
-}
-
-
-bool Isolate::MayIndexedAccess(Handle<JSObject> receiver,
- uint32_t index,
- v8::AccessType type) {
- DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
- // Check for compatibility between the security tokens in the
- // current lexical context and the accessed object.
- DCHECK(context());
-
- MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
- if (decision != UNKNOWN) return decision == YES;
-
- HandleScope scope(this);
- Handle<Object> data;
- v8::IndexedSecurityCallback callback;
- { DisallowHeapAllocation no_gc;
- // Get named access check callback
- AccessCheckInfo* access_check_info = GetAccessCheckInfo(this, receiver);
- if (!access_check_info) return false;
- Object* fun_obj = access_check_info->indexed_callback();
- callback = v8::ToCData<v8::IndexedSecurityCallback>(fun_obj);
- if (!callback) return false;
- data = handle(access_check_info->data(), this);
- }
-
- LOG(this, ApiIndexedSecurityCheck(index));
-
- // Leaving JavaScript.
- VMState<EXTERNAL> state(this);
- return callback(
- v8::Utils::ToLocal(receiver), index, type, v8::Utils::ToLocal(data));
+ Handle<Object> key = factory()->undefined_value();
+ return callback(v8::Utils::ToLocal(receiver), v8::Utils::ToLocal(key),
+ v8::ACCESS_HAS, v8::Utils::ToLocal(data));
}
@@ -895,7 +840,7 @@ Object* Isolate::StackOverflow() {
Handle<JSObject> boilerplate = Handle<JSObject>::cast(
Object::GetProperty(js_builtins_object(), key).ToHandleChecked());
Handle<JSObject> exception = factory()->CopyJSObject(boilerplate);
- DoThrow(*exception, NULL);
+ Throw(*exception, nullptr);
CaptureAndSetSimpleStackTrace(exception, factory()->undefined_value());
return heap()->exception();
@@ -903,8 +848,7 @@ Object* Isolate::StackOverflow() {
Object* Isolate::TerminateExecution() {
- DoThrow(heap_.termination_exception(), NULL);
- return heap()->exception();
+ return Throw(heap_.termination_exception(), nullptr);
}
@@ -949,19 +893,119 @@ void Isolate::InvokeApiInterruptCallbacks() {
}
+void ReportBootstrappingException(Handle<Object> exception,
+ MessageLocation* location) {
+ base::OS::PrintError("Exception thrown during bootstrapping\n");
+ if (location == NULL || location->script().is_null()) return;
+ // We are bootstrapping and caught an error where the location is set
+ // and we have a script for the location.
+ // In this case we could have an extension (or an internal error
+ // somewhere) and we print out the line number at which the error occured
+ // to the console for easier debugging.
+ int line_number =
+ location->script()->GetLineNumber(location->start_pos()) + 1;
+ if (exception->IsString() && location->script()->name()->IsString()) {
+ base::OS::PrintError(
+ "Extension or internal compilation error: %s in %s at line %d.\n",
+ String::cast(*exception)->ToCString().get(),
+ String::cast(location->script()->name())->ToCString().get(),
+ line_number);
+ } else if (location->script()->name()->IsString()) {
+ base::OS::PrintError(
+ "Extension or internal compilation error in %s at line %d.\n",
+ String::cast(location->script()->name())->ToCString().get(),
+ line_number);
+ } else {
+ base::OS::PrintError("Extension or internal compilation error.\n");
+ }
+#ifdef OBJECT_PRINT
+ // Since comments and empty lines have been stripped from the source of
+ // builtins, print the actual source here so that line numbers match.
+ if (location->script()->source()->IsString()) {
+ Handle<String> src(String::cast(location->script()->source()));
+ PrintF("Failing script:\n");
+ int len = src->length();
+ int line_number = 1;
+ PrintF("%5d: ", line_number);
+ for (int i = 0; i < len; i++) {
+ uint16_t character = src->Get(i);
+ PrintF("%c", character);
+ if (character == '\n' && i < len - 2) {
+ PrintF("%5d: ", ++line_number);
+ }
+ }
+ }
+#endif
+}
+
+
Object* Isolate::Throw(Object* exception, MessageLocation* location) {
- DoThrow(exception, location);
+ DCHECK(!has_pending_exception());
+
+ HandleScope scope(this);
+ Handle<Object> exception_handle(exception, this);
+
+ // Determine whether a message needs to be created for the given exception
+ // depending on the following criteria:
+ // 1) External v8::TryCatch missing: Always create a message because any
+ // JavaScript handler for a finally-block might re-throw to top-level.
+ // 2) External v8::TryCatch exists: Only create a message if the handler
+ // captures messages or is verbose (which reports despite the catch).
+ // 3) ReThrow from v8::TryCatch: The message from a previous throw still
+ // exists and we preserve it instead of creating a new message.
+ bool requires_message = try_catch_handler() == nullptr ||
+ try_catch_handler()->is_verbose_ ||
+ try_catch_handler()->capture_message_;
+ bool rethrowing_message = thread_local_top()->rethrowing_message_;
+
+ thread_local_top()->rethrowing_message_ = false;
+
+ // Notify debugger of exception.
+ if (is_catchable_by_javascript(exception)) {
+ debug()->OnThrow(exception_handle);
+ }
+
+ // Generate the message if required.
+ if (requires_message && !rethrowing_message) {
+ MessageLocation potential_computed_location;
+ if (location == NULL) {
+ // If no location was specified we use a computed one instead.
+ ComputeLocation(&potential_computed_location);
+ location = &potential_computed_location;
+ }
+
+ if (bootstrapper()->IsActive()) {
+ // It's not safe to try to make message objects or collect stack traces
+ // while the bootstrapper is active since the infrastructure may not have
+ // been properly initialized.
+ ReportBootstrappingException(exception_handle, location);
+ } else {
+ Handle<Object> message_obj = CreateMessage(exception_handle, location);
+ thread_local_top()->pending_message_obj_ = *message_obj;
+
+ // If the abort-on-uncaught-exception flag is specified, abort on any
+ // exception not caught by JavaScript, even when an external handler is
+ // present. This flag is intended for use by JavaScript developers, so
+ // print a user-friendly stack trace (not an internal one).
+ if (FLAG_abort_on_uncaught_exception &&
+ PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT) {
+ FLAG_abort_on_uncaught_exception = false; // Prevent endless recursion.
+ PrintF(stderr, "%s\n\nFROM\n",
+ MessageHandler::GetLocalizedMessage(this, message_obj).get());
+ PrintCurrentStackTrace(stderr);
+ base::OS::Abort();
+ }
+ }
+ }
+
+ // Set the exception being thrown.
+ set_pending_exception(*exception_handle);
return heap()->exception();
}
Object* Isolate::ReThrow(Object* exception) {
- bool can_be_caught_externally = false;
- bool catchable_by_javascript = is_catchable_by_javascript(exception);
- ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
-
- thread_local_top()->catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
+ DCHECK(!has_pending_exception());
// Set the exception being re-thrown.
set_pending_exception(exception);
@@ -969,9 +1013,138 @@ Object* Isolate::ReThrow(Object* exception) {
}
+Object* Isolate::FindHandler() {
+ Object* exception = pending_exception();
+
+ Code* code = nullptr;
+ Context* context = nullptr;
+ intptr_t offset = 0;
+ Address handler_sp = nullptr;
+ Address handler_fp = nullptr;
+
+ // Special handling of termination exceptions, uncatchable by JavaScript code,
+ // we unwind the handlers until the top ENTRY handler is found.
+ bool catchable_by_js = is_catchable_by_javascript(exception);
+
+ // Compute handler and stack unwinding information by performing a full walk
+ // over the stack and dispatching according to the frame type.
+ for (StackFrameIterator iter(this); !iter.done(); iter.Advance()) {
+ StackFrame* frame = iter.frame();
+
+ // For JSEntryStub frames we always have a handler.
+ if (frame->is_entry() || frame->is_entry_construct()) {
+ StackHandler* handler = frame->top_handler();
+
+ // Restore the next handler.
+ thread_local_top()->handler_ = handler->next()->address();
+
+ // Gather information from the handler.
+ code = frame->LookupCode();
+ handler_sp = handler->address() + StackHandlerConstants::kSize;
+ offset = Smi::cast(code->handler_table()->get(0))->value();
+ break;
+ }
+
+ // For optimized frames we perform a lookup in the handler table.
+ if (frame->is_optimized() && catchable_by_js) {
+ OptimizedFrame* js_frame = static_cast<OptimizedFrame*>(frame);
+ int stack_slots = 0; // Will contain stack slot count of frame.
+ offset = js_frame->LookupExceptionHandlerInTable(&stack_slots);
+ if (offset < 0) continue;
+
+ // Compute the stack pointer from the frame pointer. This ensures that
+ // argument slots on the stack are dropped as returning would.
+ Address return_sp = frame->fp() -
+ StandardFrameConstants::kFixedFrameSizeFromFp -
+ stack_slots * kPointerSize;
+
+ // Gather information from the frame.
+ code = frame->LookupCode();
+ handler_sp = return_sp;
+ handler_fp = frame->fp();
+ break;
+ }
+
+ // For JavaScript frames we perform a range lookup in the handler table.
+ if (frame->is_java_script() && catchable_by_js) {
+ JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
+ int stack_slots = 0; // Will contain operand stack depth of handler.
+ offset = js_frame->LookupExceptionHandlerInTable(&stack_slots);
+ if (offset < 0) continue;
+
+ // Compute the stack pointer from the frame pointer. This ensures that
+ // operand stack slots are dropped for nested statements. Also restore
+ // correct context for the handler which is pushed within the try-block.
+ Address return_sp = frame->fp() -
+ StandardFrameConstants::kFixedFrameSizeFromFp -
+ stack_slots * kPointerSize;
+ STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
+ context = Context::cast(Memory::Object_at(return_sp - kPointerSize));
+
+ // Gather information from the frame.
+ code = frame->LookupCode();
+ handler_sp = return_sp;
+ handler_fp = frame->fp();
+ break;
+ }
+ }
+
+ // Handler must exist.
+ CHECK(code != nullptr);
+
+ // Store information to be consumed by the CEntryStub.
+ thread_local_top()->pending_handler_context_ = context;
+ thread_local_top()->pending_handler_code_ = code;
+ thread_local_top()->pending_handler_offset_ = offset;
+ thread_local_top()->pending_handler_fp_ = handler_fp;
+ thread_local_top()->pending_handler_sp_ = handler_sp;
+
+ // Return and clear pending exception.
+ clear_pending_exception();
+ return exception;
+}
+
+
+Isolate::CatchType Isolate::PredictExceptionCatcher() {
+ Address external_handler = thread_local_top()->try_catch_handler_address();
+ Address entry_handler = Isolate::handler(thread_local_top());
+ if (IsExternalHandlerOnTop(nullptr)) return CAUGHT_BY_EXTERNAL;
+
+ // Search for an exception handler by performing a full walk over the stack.
+ for (StackFrameIterator iter(this); !iter.done(); iter.Advance()) {
+ StackFrame* frame = iter.frame();
+
+ // For JSEntryStub frames we update the JS_ENTRY handler.
+ if (frame->is_entry() || frame->is_entry_construct()) {
+ entry_handler = frame->top_handler()->next()->address();
+ }
+
+ // For JavaScript frames we perform a lookup in the handler table.
+ if (frame->is_java_script()) {
+ JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
+ int stack_slots = 0; // The computed stack slot count is not used.
+ if (js_frame->LookupExceptionHandlerInTable(&stack_slots) > 0) {
+ return CAUGHT_BY_JAVASCRIPT;
+ }
+ }
+
+ // The exception has been externally caught if and only if there is an
+ // external handler which is on top of the top-most JS_ENTRY handler.
+ if (external_handler != nullptr && !try_catch_handler()->is_verbose_) {
+ if (entry_handler == nullptr || entry_handler > external_handler) {
+ return CAUGHT_BY_EXTERNAL;
+ }
+ }
+ }
+
+ // Handler not found.
+ return NOT_CAUGHT;
+}
+
+
Object* Isolate::ThrowIllegalOperation() {
if (FLAG_stack_trace_on_illegal) PrintStack(stdout);
- return Throw(heap_.illegal_access_string());
+ return Throw(heap()->illegal_access_string());
}
@@ -994,13 +1167,8 @@ void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
DCHECK(handler->rethrow_);
DCHECK(handler->capture_message_);
Object* message = reinterpret_cast<Object*>(handler->message_obj_);
- Object* script = reinterpret_cast<Object*>(handler->message_script_);
DCHECK(message->IsJSMessageObject() || message->IsTheHole());
- DCHECK(script->IsScript() || script->IsTheHole());
thread_local_top()->pending_message_obj_ = message;
- thread_local_top()->pending_message_script_ = script;
- thread_local_top()->pending_message_start_pos_ = handler->message_start_pos_;
- thread_local_top()->pending_message_end_pos_ = handler->message_end_pos_;
}
@@ -1127,37 +1295,6 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
}
-bool Isolate::ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript) {
- // Find the top-most try-catch handler.
- StackHandler* handler =
- StackHandler::FromAddress(Isolate::handler(thread_local_top()));
- while (handler != NULL && !handler->is_catch()) {
- handler = handler->next();
- }
-
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-catch
- // handler.
- *can_be_caught_externally = external_handler_address != NULL &&
- (handler == NULL || handler->address() > external_handler_address ||
- !catchable_by_javascript);
-
- if (*can_be_caught_externally) {
- // Only report the exception if the external handler is verbose.
- return try_catch_handler()->is_verbose_;
- } else {
- // Report the exception if it isn't caught by JavaScript code.
- return handler == NULL;
- }
-}
-
-
// Traverse prototype chain to find out whether the object is derived from
// the Error object.
bool Isolate::IsErrorObject(Handle<Object> obj) {
@@ -1172,7 +1309,7 @@ bool Isolate::IsErrorObject(Handle<Object> obj) {
for (PrototypeIterator iter(this, *obj, PrototypeIterator::START_AT_RECEIVER);
!iter.IsAtEnd(); iter.Advance()) {
if (iter.GetCurrent()->IsJSProxy()) return false;
- if (JSObject::cast(iter.GetCurrent())->map()->constructor() ==
+ if (JSObject::cast(iter.GetCurrent())->map()->GetConstructor() ==
*error_constructor) {
return true;
}
@@ -1180,8 +1317,6 @@ bool Isolate::IsErrorObject(Handle<Object> obj) {
return false;
}
-static int fatal_exception_depth = 0;
-
Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
MessageLocation* location) {
@@ -1231,190 +1366,96 @@ Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
}
-void ReportBootstrappingException(Handle<Object> exception,
- MessageLocation* location) {
- base::OS::PrintError("Exception thrown during bootstrapping\n");
- if (location == NULL || location->script().is_null()) return;
- // We are bootstrapping and caught an error where the location is set
- // and we have a script for the location.
- // In this case we could have an extension (or an internal error
- // somewhere) and we print out the line number at which the error occured
- // to the console for easier debugging.
- int line_number =
- location->script()->GetLineNumber(location->start_pos()) + 1;
- if (exception->IsString() && location->script()->name()->IsString()) {
- base::OS::PrintError(
- "Extension or internal compilation error: %s in %s at line %d.\n",
- String::cast(*exception)->ToCString().get(),
- String::cast(location->script()->name())->ToCString().get(),
- line_number);
- } else if (location->script()->name()->IsString()) {
- base::OS::PrintError(
- "Extension or internal compilation error in %s at line %d.\n",
- String::cast(location->script()->name())->ToCString().get(),
- line_number);
- } else {
- base::OS::PrintError("Extension or internal compilation error.\n");
- }
-#ifdef OBJECT_PRINT
- // Since comments and empty lines have been stripped from the source of
- // builtins, print the actual source here so that line numbers match.
- if (location->script()->source()->IsString()) {
- Handle<String> src(String::cast(location->script()->source()));
- PrintF("Failing script:\n");
- int len = src->length();
- int line_number = 1;
- PrintF("%5d: ", line_number);
- for (int i = 0; i < len; i++) {
- uint16_t character = src->Get(i);
- PrintF("%c", character);
- if (character == '\n' && i < len - 2) {
- PrintF("%5d: ", ++line_number);
- }
- }
- }
-#endif
-}
+bool Isolate::IsJavaScriptHandlerOnTop(Object* exception) {
+ DCHECK_NE(heap()->the_hole_value(), exception);
+ // For uncatchable exceptions, the JavaScript handler cannot be on top.
+ if (!is_catchable_by_javascript(exception)) return false;
-void Isolate::DoThrow(Object* exception, MessageLocation* location) {
- DCHECK(!has_pending_exception());
+ // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
+ Address entry_handler = Isolate::handler(thread_local_top());
+ if (entry_handler == nullptr) return false;
- HandleScope scope(this);
- Handle<Object> exception_handle(exception, this);
-
- // Determine reporting and whether the exception is caught externally.
- bool catchable_by_javascript = is_catchable_by_javascript(exception);
- bool can_be_caught_externally = false;
- bool should_report_exception =
- ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
- bool report_exception = catchable_by_javascript && should_report_exception;
- bool try_catch_needs_message =
- can_be_caught_externally && try_catch_handler()->capture_message_;
- bool rethrowing_message = thread_local_top()->rethrowing_message_;
-
- thread_local_top()->rethrowing_message_ = false;
-
- // Notify debugger of exception.
- if (catchable_by_javascript) {
- debug()->OnThrow(exception_handle, report_exception);
- }
+ // Get the address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ Address external_handler = thread_local_top()->try_catch_handler_address();
+ if (external_handler == nullptr) return true;
- // Generate the message if required.
- if (!rethrowing_message && (report_exception || try_catch_needs_message)) {
- MessageLocation potential_computed_location;
- if (location == NULL) {
- // If no location was specified we use a computed one instead.
- ComputeLocation(&potential_computed_location);
- location = &potential_computed_location;
- }
+ // The exception has been externally caught if and only if there is an
+ // external handler which is on top of the top-most JS_ENTRY handler.
+ //
+ // Note, that finally clauses would re-throw an exception unless it's aborted
+ // by jumps in control flow (like return, break, etc.) and we'll have another
+ // chance to set proper v8::TryCatch later.
+ return (entry_handler < external_handler);
+}
- if (bootstrapper()->IsActive()) {
- // It's not safe to try to make message objects or collect stack traces
- // while the bootstrapper is active since the infrastructure may not have
- // been properly initialized.
- ReportBootstrappingException(exception_handle, location);
- } else {
- Handle<Object> message_obj = CreateMessage(exception_handle, location);
- thread_local_top()->pending_message_obj_ = *message_obj;
- thread_local_top()->pending_message_script_ = *location->script();
- thread_local_top()->pending_message_start_pos_ = location->start_pos();
- thread_local_top()->pending_message_end_pos_ = location->end_pos();
+bool Isolate::IsExternalHandlerOnTop(Object* exception) {
+ DCHECK_NE(heap()->the_hole_value(), exception);
- // If the abort-on-uncaught-exception flag is specified, abort on any
- // exception not caught by JavaScript, even when an external handler is
- // present. This flag is intended for use by JavaScript developers, so
- // print a user-friendly stack trace (not an internal one).
- if (fatal_exception_depth == 0 && FLAG_abort_on_uncaught_exception &&
- (report_exception || can_be_caught_externally)) {
- fatal_exception_depth++;
- PrintF(stderr, "%s\n\nFROM\n",
- MessageHandler::GetLocalizedMessage(this, message_obj).get());
- PrintCurrentStackTrace(stderr);
- base::OS::Abort();
- }
- }
- }
+ // Get the address of the external handler so we can compare the address to
+ // determine which one is closer to the top of the stack.
+ Address external_handler = thread_local_top()->try_catch_handler_address();
+ if (external_handler == nullptr) return false;
- // Save the message for reporting if the the exception remains uncaught.
- thread_local_top()->has_pending_message_ = report_exception;
+ // For uncatchable exceptions, the external handler is always on top.
+ if (!is_catchable_by_javascript(exception)) return true;
- // Do not forget to clean catcher_ if currently thrown exception cannot
- // be caught. If necessary, ReThrow will update the catcher.
- thread_local_top()->catcher_ = can_be_caught_externally ?
- try_catch_handler() : NULL;
+ // Get the top-most JS_ENTRY handler, cannot be on top if it doesn't exist.
+ Address entry_handler = Isolate::handler(thread_local_top());
+ if (entry_handler == nullptr) return true;
- set_pending_exception(*exception_handle);
+ // The exception has been externally caught if and only if there is an
+ // external handler which is on top of the top-most JS_ENTRY handler.
+ //
+ // Note, that finally clauses would re-throw an exception unless it's aborted
+ // by jumps in control flow (like return, break, etc.) and we'll have another
+ // chance to set proper v8::TryCatch later.
+ return (entry_handler > external_handler);
}
-bool Isolate::HasExternalTryCatch() {
- DCHECK(has_pending_exception());
+void Isolate::ReportPendingMessages() {
+ Object* exception = pending_exception();
- return (thread_local_top()->catcher_ != NULL) &&
- (try_catch_handler() == thread_local_top()->catcher_);
-}
+ // Try to propagate the exception to an external v8::TryCatch handler. If
+ // propagation was unsuccessful, then we will get another chance at reporting
+ // the pending message if the exception is re-thrown.
+ bool has_been_propagated = PropagatePendingExceptionToExternalTryCatch();
+ if (!has_been_propagated) return;
+ // Clear the pending message object early to avoid endless recursion.
+ Object* message_obj = thread_local_top_.pending_message_obj_;
+ clear_pending_message();
-bool Isolate::IsFinallyOnTop() {
- // Get the address of the external handler so we can compare the address to
- // determine which one is closer to the top of the stack.
- Address external_handler_address =
- thread_local_top()->try_catch_handler_address();
- DCHECK(external_handler_address != NULL);
-
- // The exception has been externally caught if and only if there is
- // an external handler which is on top of the top-most try-finally
- // handler.
- // There should be no try-catch blocks as they would prohibit us from
- // finding external catcher in the first place (see catcher_ check above).
- //
- // Note, that finally clause would rethrow an exception unless it's
- // aborted by jumps in control flow like return, break, etc. and we'll
- // have another chances to set proper v8::TryCatch.
- StackHandler* handler =
- StackHandler::FromAddress(Isolate::handler(thread_local_top()));
- while (handler != NULL && handler->address() < external_handler_address) {
- DCHECK(!handler->is_catch());
- if (handler->is_finally()) return true;
+ // For uncatchable exceptions we do nothing. If needed, the exception and the
+ // message have already been propagated to v8::TryCatch.
+ if (!is_catchable_by_javascript(exception)) return;
- handler = handler->next();
+ // Determine whether the message needs to be reported to all message handlers
+ // depending on whether and external v8::TryCatch or an internal JavaScript
+ // handler is on top.
+ bool should_report_exception;
+ if (IsExternalHandlerOnTop(exception)) {
+ // Only report the exception if the external handler is verbose.
+ should_report_exception = try_catch_handler()->is_verbose_;
+ } else {
+ // Report the exception if it isn't caught by JavaScript code.
+ should_report_exception = !IsJavaScriptHandlerOnTop(exception);
}
- return false;
-}
-
-
-void Isolate::ReportPendingMessages() {
- DCHECK(has_pending_exception());
- bool can_clear_message = PropagatePendingExceptionToExternalTryCatch();
-
- HandleScope scope(this);
- if (thread_local_top_.pending_exception_ == heap()->termination_exception()) {
- // Do nothing: if needed, the exception has been already propagated to
- // v8::TryCatch.
- } else {
- if (thread_local_top_.has_pending_message_) {
- thread_local_top_.has_pending_message_ = false;
- if (!thread_local_top_.pending_message_obj_->IsTheHole()) {
- HandleScope scope(this);
- Handle<Object> message_obj(thread_local_top_.pending_message_obj_,
- this);
- if (!thread_local_top_.pending_message_script_->IsTheHole()) {
- Handle<Script> script(
- Script::cast(thread_local_top_.pending_message_script_));
- int start_pos = thread_local_top_.pending_message_start_pos_;
- int end_pos = thread_local_top_.pending_message_end_pos_;
- MessageLocation location(script, start_pos, end_pos);
- MessageHandler::ReportMessage(this, &location, message_obj);
- } else {
- MessageHandler::ReportMessage(this, NULL, message_obj);
- }
- }
- }
+ // Actually report the pending message to all message handlers.
+ if (!message_obj->IsTheHole() && should_report_exception) {
+ HandleScope scope(this);
+ Handle<JSMessageObject> message(JSMessageObject::cast(message_obj));
+ Handle<JSValue> script_wrapper(JSValue::cast(message->script()));
+ Handle<Script> script(Script::cast(script_wrapper->value()));
+ int start_pos = message->start_position();
+ int end_pos = message->end_position();
+ MessageLocation location(script, start_pos, end_pos);
+ MessageHandler::ReportMessage(this, &location, message);
}
- if (can_clear_message) clear_pending_message();
}
@@ -1422,12 +1463,13 @@ MessageLocation Isolate::GetMessageLocation() {
DCHECK(has_pending_exception());
if (thread_local_top_.pending_exception_ != heap()->termination_exception() &&
- thread_local_top_.has_pending_message_ &&
!thread_local_top_.pending_message_obj_->IsTheHole()) {
- Handle<Script> script(
- Script::cast(thread_local_top_.pending_message_script_));
- int start_pos = thread_local_top_.pending_message_start_pos_;
- int end_pos = thread_local_top_.pending_message_end_pos_;
+ Handle<JSMessageObject> message_obj(
+ JSMessageObject::cast(thread_local_top_.pending_message_obj_));
+ Handle<JSValue> script_wrapper(JSValue::cast(message_obj->script()));
+ Handle<Script> script(Script::cast(script_wrapper->value()));
+ int start_pos = message_obj->start_position();
+ int end_pos = message_obj->end_position();
return MessageLocation(script, start_pos, end_pos);
}
@@ -1478,13 +1520,16 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
}
-void Isolate::PushPromise(Handle<JSObject> promise) {
+void Isolate::PushPromise(Handle<JSObject> promise,
+ Handle<JSFunction> function) {
ThreadLocalTop* tltop = thread_local_top();
PromiseOnStack* prev = tltop->promise_on_stack_;
- StackHandler* handler = StackHandler::FromAddress(Isolate::handler(tltop));
- Handle<JSObject> global_handle =
+ Handle<JSObject> global_promise =
Handle<JSObject>::cast(global_handles()->Create(*promise));
- tltop->promise_on_stack_ = new PromiseOnStack(handler, global_handle, prev);
+ Handle<JSFunction> global_function =
+ Handle<JSFunction>::cast(global_handles()->Create(*function));
+ tltop->promise_on_stack_ =
+ new PromiseOnStack(global_function, global_promise, prev);
}
@@ -1492,10 +1537,12 @@ void Isolate::PopPromise() {
ThreadLocalTop* tltop = thread_local_top();
if (tltop->promise_on_stack_ == NULL) return;
PromiseOnStack* prev = tltop->promise_on_stack_->prev();
- Handle<Object> global_handle = tltop->promise_on_stack_->promise();
+ Handle<Object> global_function = tltop->promise_on_stack_->function();
+ Handle<Object> global_promise = tltop->promise_on_stack_->promise();
delete tltop->promise_on_stack_;
tltop->promise_on_stack_ = prev;
- global_handles()->Destroy(global_handle.location());
+ global_handles()->Destroy(global_function.location());
+ global_handles()->Destroy(global_promise.location());
}
@@ -1503,17 +1550,21 @@ Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
Handle<Object> undefined = factory()->undefined_value();
ThreadLocalTop* tltop = thread_local_top();
if (tltop->promise_on_stack_ == NULL) return undefined;
- StackHandler* promise_try = tltop->promise_on_stack_->handler();
- // Find the top-most try-catch handler.
- StackHandler* handler = StackHandler::FromAddress(Isolate::handler(tltop));
- do {
- if (handler == promise_try) {
- return tltop->promise_on_stack_->promise();
+ Handle<JSFunction> promise_function = tltop->promise_on_stack_->function();
+ // Find the top-most try-catch or try-finally handler.
+ if (PredictExceptionCatcher() != CAUGHT_BY_JAVASCRIPT) return undefined;
+ for (JavaScriptFrameIterator it(this); !it.done(); it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ int stack_slots = 0; // The computed stack slot count is not used.
+ if (frame->LookupExceptionHandlerInTable(&stack_slots) > 0) {
+ // Throwing inside a Promise only leads to a reject if not caught by an
+ // inner try-catch or try-finally.
+ if (frame->function() == *promise_function) {
+ return tltop->promise_on_stack_->promise();
+ }
+ return undefined;
}
- handler = handler->next();
- // Throwing inside a Promise can be intercepted by an inner try-catch, so
- // we stop at the first try-catch handler.
- } while (handler != NULL && !handler->is_catch());
+ }
return undefined;
}
@@ -1757,11 +1808,6 @@ void Isolate::TearDown() {
thread_data_table_->RemoveAllThreads(this);
}
- if (serialize_partial_snapshot_cache_ != NULL) {
- delete[] serialize_partial_snapshot_cache_;
- serialize_partial_snapshot_cache_ = NULL;
- }
-
delete this;
// Restore the previous current isolate.
@@ -1775,6 +1821,16 @@ void Isolate::GlobalTearDown() {
}
+void Isolate::ClearSerializerData() {
+ delete external_reference_table_;
+ external_reference_table_ = NULL;
+ delete external_reference_map_;
+ external_reference_map_ = NULL;
+ delete root_index_map_;
+ root_index_map_ = NULL;
+}
+
+
void Isolate::Deinit() {
TRACE_ISOLATE(deinit);
@@ -1822,26 +1878,8 @@ void Isolate::Deinit() {
heap_profiler_ = NULL;
delete cpu_profiler_;
cpu_profiler_ = NULL;
-}
-
-
-void Isolate::PushToPartialSnapshotCache(Object* obj) {
- int length = serialize_partial_snapshot_cache_length();
- int capacity = serialize_partial_snapshot_cache_capacity();
- if (length >= capacity) {
- int new_capacity = static_cast<int>((capacity + 10) * 1.2);
- Object** new_array = new Object*[new_capacity];
- for (int i = 0; i < length; i++) {
- new_array[i] = serialize_partial_snapshot_cache()[i];
- }
- if (capacity != 0) delete[] serialize_partial_snapshot_cache();
- set_serialize_partial_snapshot_cache(new_array);
- set_serialize_partial_snapshot_cache_capacity(new_capacity);
- }
-
- serialize_partial_snapshot_cache()[length] = obj;
- set_serialize_partial_snapshot_cache_length(length + 1);
+ ClearSerializerData();
}
@@ -1930,9 +1968,6 @@ Isolate::~Isolate() {
delete string_stream_debug_object_cache_;
string_stream_debug_object_cache_ = NULL;
- delete external_reference_table_;
- external_reference_table_ = NULL;
-
delete random_number_generator_;
random_number_generator_ = NULL;
@@ -1948,22 +1983,20 @@ void Isolate::InitializeThreadLocal() {
bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
- DCHECK(has_pending_exception());
+ Object* exception = pending_exception();
- bool has_external_try_catch = HasExternalTryCatch();
- if (!has_external_try_catch) {
+ if (IsJavaScriptHandlerOnTop(exception)) {
thread_local_top_.external_caught_exception_ = false;
- return true;
+ return false;
}
- bool catchable_by_js = is_catchable_by_javascript(pending_exception());
- if (catchable_by_js && IsFinallyOnTop()) {
+ if (!IsExternalHandlerOnTop(exception)) {
thread_local_top_.external_caught_exception_ = false;
- return false;
+ return true;
}
thread_local_top_.external_caught_exception_ = true;
- if (thread_local_top_.pending_exception_ == heap()->termination_exception()) {
+ if (!is_catchable_by_javascript(exception)) {
try_catch_handler()->can_continue_ = false;
try_catch_handler()->has_terminated_ = true;
try_catch_handler()->exception_ = heap()->null_value();
@@ -1971,8 +2004,6 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
v8::TryCatch* handler = try_catch_handler();
DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
thread_local_top_.pending_message_obj_->IsTheHole());
- DCHECK(thread_local_top_.pending_message_script_->IsScript() ||
- thread_local_top_.pending_message_script_->IsTheHole());
handler->can_continue_ = true;
handler->has_terminated_ = false;
handler->exception_ = pending_exception();
@@ -1980,9 +2011,6 @@ bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
if (thread_local_top_.pending_message_obj_->IsTheHole()) return true;
handler->message_obj_ = thread_local_top_.pending_message_obj_;
- handler->message_script_ = thread_local_top_.pending_message_script_;
- handler->message_start_pos_ = thread_local_top_.pending_message_start_pos_;
- handler->message_end_pos_ = thread_local_top_.pending_message_end_pos_;
}
return true;
}
@@ -2088,7 +2116,7 @@ bool Isolate::Init(Deserializer* des) {
if (create_heap_objects) {
// Terminate the cache array with the sentinel so we can iterate.
- PushToPartialSnapshotCache(heap_.undefined_value());
+ partial_snapshot_cache_.Add(heap_.undefined_value());
}
InitializeThreadLocal();
@@ -2589,6 +2617,7 @@ void Isolate::CheckDetachedContextsAfterGC() {
int new_length = 0;
for (int i = 0; i < length; i += 2) {
int mark_sweeps = Smi::cast(detached_contexts->get(i))->value();
+ DCHECK(detached_contexts->get(i + 1)->IsWeakCell());
WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
if (!cell->cleared()) {
detached_contexts->set(new_length, Smi::FromInt(mark_sweeps + 1));
@@ -2602,6 +2631,7 @@ void Isolate::CheckDetachedContextsAfterGC() {
length - new_length, length);
for (int i = 0; i < new_length; i += 2) {
int mark_sweeps = Smi::cast(detached_contexts->get(i))->value();
+ DCHECK(detached_contexts->get(i + 1)->IsWeakCell());
WeakCell* cell = WeakCell::cast(detached_contexts->get(i + 1));
if (mark_sweeps > 3) {
PrintF("detached context 0x%p\n survived %d GCs (leak?)\n",
@@ -2612,8 +2642,8 @@ void Isolate::CheckDetachedContextsAfterGC() {
if (new_length == 0) {
heap()->set_detached_contexts(heap()->empty_fixed_array());
} else if (new_length < length) {
- heap()->RightTrimFixedArray<Heap::FROM_GC>(*detached_contexts,
- length - new_length);
+ heap()->RightTrimFixedArray<Heap::FROM_MUTATOR>(*detached_contexts,
+ length - new_length);
}
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index fdd1832888..80c3daea74 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -137,20 +137,14 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
#define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T) \
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
-#define THROW_NEW_ERROR(isolate, call, T) \
- do { \
- Handle<Object> __error__; \
- ASSIGN_RETURN_ON_EXCEPTION(isolate, __error__, isolate->factory()->call, \
- T); \
- return isolate->Throw<T>(__error__); \
+#define THROW_NEW_ERROR(isolate, call, T) \
+ do { \
+ return isolate->Throw<T>(isolate->factory()->call); \
} while (false)
-#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
- do { \
- Handle<Object> __error__; \
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, __error__, \
- isolate->factory()->call); \
- return isolate->Throw(*__error__); \
+#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call) \
+ do { \
+ return isolate->Throw(*isolate->factory()->call); \
} while (false)
#define RETURN_ON_EXCEPTION_VALUE(isolate, call, value) \
@@ -174,6 +168,11 @@ typedef ZoneList<Handle<Object> > ZoneObjectList;
C(CFunction, c_function) \
C(Context, context) \
C(PendingException, pending_exception) \
+ C(PendingHandlerContext, pending_handler_context) \
+ C(PendingHandlerCode, pending_handler_code) \
+ C(PendingHandlerOffset, pending_handler_offset) \
+ C(PendingHandlerFP, pending_handler_fp) \
+ C(PendingHandlerSP, pending_handler_sp) \
C(ExternalCaughtException, external_caught_exception) \
C(JSEntrySP, js_entry_sp)
@@ -274,23 +273,28 @@ class ThreadLocalTop BASE_EMBEDDED {
Context* context_;
ThreadId thread_id_;
Object* pending_exception_;
- bool has_pending_message_;
+
+ // Communication channel between Isolate::FindHandler and the CEntryStub.
+ Context* pending_handler_context_;
+ Code* pending_handler_code_;
+ intptr_t pending_handler_offset_;
+ Address pending_handler_fp_;
+ Address pending_handler_sp_;
+
+ // Communication channel between Isolate::Throw and message consumers.
bool rethrowing_message_;
Object* pending_message_obj_;
- Object* pending_message_script_;
- int pending_message_start_pos_;
- int pending_message_end_pos_;
+
// Use a separate value for scheduled exceptions to preserve the
// invariants that hold about pending_exception. We may want to
// unify them later.
Object* scheduled_exception_;
bool external_caught_exception_;
SaveContext* save_context_;
- v8::TryCatch* catcher_;
// Stack.
Address c_entry_fp_; // the frame pointer of the top c entry frame
- Address handler_; // try-blocks are chained through the stack
+ Address handler_; // try-blocks are chained through the stack
Address c_function_; // C function that was called at c entry.
// Throwing an exception may cause a Promise rejection. For this purpose
@@ -307,9 +311,6 @@ class ThreadLocalTop BASE_EMBEDDED {
ExternalCallbackScope* external_callback_scope_;
StateTag current_vm_state_;
- // Generated code scratch locations.
- int32_t formal_count_;
-
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
@@ -361,10 +362,6 @@ class ThreadLocalTop BASE_EMBEDDED {
typedef List<HeapObject*> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
- /* SerializerDeserializer state. */ \
- V(int, serialize_partial_snapshot_cache_length, 0) \
- V(int, serialize_partial_snapshot_cache_capacity, 0) \
- V(Object**, serialize_partial_snapshot_cache, NULL) \
/* Assembler state. */ \
V(FatalErrorCallback, exception_behavior, NULL) \
V(LogEventCallback, event_logger, NULL) \
@@ -379,8 +376,9 @@ typedef List<HeapObject*> DebugObjectCache;
V(Relocatable*, relocatable_top, NULL) \
V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
V(Object*, string_stream_current_security_token, NULL) \
- /* Serializer state. */ \
V(ExternalReferenceTable*, external_reference_table, NULL) \
+ V(HashMap*, external_reference_map, NULL) \
+ V(HashMap*, root_index_map, NULL) \
V(int, pending_microtask_count, 0) \
V(bool, autorun_microtasks, true) \
V(HStatistics*, hstatistics, NULL) \
@@ -391,12 +389,16 @@ typedef List<HeapObject*> DebugObjectCache;
V(int, max_available_threads, 0) \
V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu) \
V(PromiseRejectCallback, promise_reject_callback, NULL) \
+ V(const v8::StartupData*, snapshot_blob, NULL) \
ISOLATE_INIT_SIMULATOR_LIST(V)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
inline void set_##name(type v) { thread_local_top_.name##_ = v; } \
inline type name() const { return thread_local_top_.name##_; }
+#define THREAD_LOCAL_TOP_ADDRESS(type, name) \
+ type* name##_address() { return &thread_local_top_.name##_; }
+
class Isolate {
// These forward declarations are required to make the friend declarations in
@@ -526,6 +528,8 @@ class Isolate {
static void GlobalTearDown();
+ void ClearSerializerData();
+
// Find the PerThread for this particular (isolate, thread) combination
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThisThread();
@@ -583,51 +587,37 @@ class Isolate {
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
- Object** pending_exception_address() {
- return &thread_local_top_.pending_exception_;
- }
+ THREAD_LOCAL_TOP_ADDRESS(Object*, pending_exception)
bool has_pending_exception() {
DCHECK(!thread_local_top_.pending_exception_->IsException());
return !thread_local_top_.pending_exception_->IsTheHole();
}
+ THREAD_LOCAL_TOP_ADDRESS(Context*, pending_handler_context)
+ THREAD_LOCAL_TOP_ADDRESS(Code*, pending_handler_code)
+ THREAD_LOCAL_TOP_ADDRESS(intptr_t, pending_handler_offset)
+ THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_fp)
+ THREAD_LOCAL_TOP_ADDRESS(Address, pending_handler_sp)
+
THREAD_LOCAL_TOP_ACCESSOR(bool, external_caught_exception)
void clear_pending_message() {
- thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
- thread_local_top_.pending_message_script_ = heap_.the_hole_value();
}
v8::TryCatch* try_catch_handler() {
return thread_local_top_.try_catch_handler();
}
- Address try_catch_handler_address() {
- return thread_local_top_.try_catch_handler_address();
- }
bool* external_caught_exception_address() {
return &thread_local_top_.external_caught_exception_;
}
- THREAD_LOCAL_TOP_ACCESSOR(v8::TryCatch*, catcher)
-
- Object** scheduled_exception_address() {
- return &thread_local_top_.scheduled_exception_;
- }
+ THREAD_LOCAL_TOP_ADDRESS(Object*, scheduled_exception)
Address pending_message_obj_address() {
return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
}
- Address has_pending_message_address() {
- return reinterpret_cast<Address>(&thread_local_top_.has_pending_message_);
- }
-
- Address pending_message_script_address() {
- return reinterpret_cast<Address>(
- &thread_local_top_.pending_message_script_);
- }
-
Object* scheduled_exception() {
DCHECK(has_scheduled_exception());
DCHECK(!thread_local_top_.scheduled_exception_->IsException());
@@ -642,16 +632,13 @@ class Isolate {
thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
}
- bool HasExternalTryCatch();
- bool IsFinallyOnTop();
+ bool IsJavaScriptHandlerOnTop(Object* exception);
+ bool IsExternalHandlerOnTop(Object* exception);
bool is_catchable_by_javascript(Object* exception) {
return exception != heap()->termination_exception();
}
- // Serializer.
- void PushToPartialSnapshotCache(Object* obj);
-
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
return thread->c_entry_fp_;
@@ -675,9 +662,6 @@ class Isolate {
return &thread_local_top_.js_entry_sp_;
}
- // Generated code scratch locations.
- void* formal_count_address() { return &thread_local_top_.formal_count_; }
-
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
Handle<GlobalObject> global_object() {
@@ -703,29 +687,25 @@ class Isolate {
bool OptionalRescheduleException(bool is_bottom_call);
// Push and pop a promise and the current try-catch handler.
- void PushPromise(Handle<JSObject> promise);
+ void PushPromise(Handle<JSObject> promise, Handle<JSFunction> function);
void PopPromise();
Handle<Object> GetPromiseOnStackOnThrow();
class ExceptionScope {
public:
- explicit ExceptionScope(Isolate* isolate) :
- // Scope currently can only be used for regular exceptions,
- // not termination exception.
- isolate_(isolate),
- pending_exception_(isolate_->pending_exception(), isolate_),
- catcher_(isolate_->catcher())
- { }
+ // Scope currently can only be used for regular exceptions,
+ // not termination exception.
+ explicit ExceptionScope(Isolate* isolate)
+ : isolate_(isolate),
+ pending_exception_(isolate_->pending_exception(), isolate_) {}
~ExceptionScope() {
- isolate_->set_catcher(catcher_);
isolate_->set_pending_exception(*pending_exception_);
}
private:
Isolate* isolate_;
Handle<Object> pending_exception_;
- v8::TryCatch* catcher_;
};
void SetCaptureStackTraceForUncaughtExceptions(
@@ -757,21 +737,17 @@ class Isolate {
// the result is false, the pending exception is guaranteed to be
// set.
- bool MayNamedAccess(Handle<JSObject> receiver,
- Handle<Object> key,
- v8::AccessType type);
- bool MayIndexedAccess(Handle<JSObject> receiver,
- uint32_t index,
- v8::AccessType type);
+ bool MayAccess(Handle<JSObject> receiver);
bool IsInternallyUsedPropertyName(Handle<Object> name);
bool IsInternallyUsedPropertyName(Object* name);
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
- void ReportFailedAccessCheck(Handle<JSObject> receiver, v8::AccessType type);
+ void ReportFailedAccessCheck(Handle<JSObject> receiver);
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
Object* Throw(Object* exception, MessageLocation* location = NULL);
+ Object* ThrowIllegalOperation();
template <typename T>
MUST_USE_RESULT MaybeHandle<T> Throw(Handle<Object> exception,
@@ -780,10 +756,21 @@ class Isolate {
return MaybeHandle<T>();
}
- // Re-throw an exception. This involves no error reporting since
- // error reporting was handled when the exception was thrown
- // originally.
+ // Re-throw an exception. This involves no error reporting since error
+ // reporting was handled when the exception was thrown originally.
Object* ReThrow(Object* exception);
+
+ // Find the correct handler for the current pending exception. This also
+ // clears and returns the current pending exception.
+ Object* FindHandler();
+
+ // Tries to predict whether an exception will be caught. Note that this can
+ // only produce an estimate, because it is undecidable whether a finally
+ // clause will consume or re-throw an exception. We conservatively assume any
+ // finally clause will behave as if the exception were consumed.
+ enum CatchType { NOT_CAUGHT, CAUGHT_BY_JAVASCRIPT, CAUGHT_BY_EXTERNAL };
+ CatchType PredictExceptionCatcher();
+
void ScheduleThrow(Object* exception);
// Re-set pending message, script and positions reported to the TryCatch
// back to the TLS for re-use when rethrowing.
@@ -793,15 +780,9 @@ class Isolate {
void ReportPendingMessages();
// Return pending location if any or unfilled structure.
MessageLocation GetMessageLocation();
- Object* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
Object* PromoteScheduledException();
- void DoThrow(Object* exception, MessageLocation* location);
- // Checks if exception should be reported and finds out if it's
- // caught externally.
- bool ShouldReportException(bool* can_be_caught_externally,
- bool catchable_by_javascript);
// Attempts to compute the current source location, storing the
// result in the target out parameter.
@@ -828,7 +809,6 @@ class Isolate {
char* Iterate(ObjectVisitor* v, char* t);
void IterateThread(ThreadVisitor* v, char* t);
-
// Returns the current native context.
Handle<Context> native_context();
@@ -1006,6 +986,7 @@ class Isolate {
}
bool serializer_enabled() const { return serializer_enabled_; }
+ bool snapshot_available() const { return snapshot_blob_ != NULL; }
bool IsDead() { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
@@ -1139,9 +1120,12 @@ class Isolate {
void AddDetachedContext(Handle<Context> context);
void CheckDetachedContextsAfterGC();
- private:
+ List<Object*>* partial_snapshot_cache() { return &partial_snapshot_cache_; }
+
+ protected:
explicit Isolate(bool enable_serializer);
+ private:
friend struct GlobalState;
friend struct InitializeGlobalState;
@@ -1360,6 +1344,7 @@ class Isolate {
v8::Isolate::UseCounterCallback use_counter_callback_;
BasicBlockProfiler* basic_block_profiler_;
+ List<Object*> partial_snapshot_cache_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
@@ -1374,6 +1359,7 @@ class Isolate {
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
+ friend v8::StartupData v8::V8::CreateSnapshotDataBlob(const char*);
DISALLOW_COPY_AND_ASSIGN(Isolate);
};
@@ -1385,15 +1371,15 @@ class Isolate {
class PromiseOnStack {
public:
- PromiseOnStack(StackHandler* handler, Handle<JSObject> promise,
+ PromiseOnStack(Handle<JSFunction> function, Handle<JSObject> promise,
PromiseOnStack* prev)
- : handler_(handler), promise_(promise), prev_(prev) {}
- StackHandler* handler() { return handler_; }
+ : function_(function), promise_(promise), prev_(prev) {}
+ Handle<JSFunction> function() { return function_; }
Handle<JSObject> promise() { return promise_; }
PromiseOnStack* prev() { return prev_; }
private:
- StackHandler* handler_;
+ Handle<JSFunction> function_;
Handle<JSObject> promise_;
PromiseOnStack* prev_;
};
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 9a22738e98..12eea78fca 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -108,8 +108,9 @@ class JsonParser BASE_EMBEDDED {
const uint8_t* expected_chars = content.ToOneByteVector().start();
for (int i = 0; i < length; i++) {
uint8_t c0 = input_chars[i];
- // The expected string has to be free of \, " and characters < 0x20.
- if (c0 != expected_chars[i]) return false;
+ if (c0 != expected_chars[i] || c0 == '"' || c0 < 0x20 || c0 == '\\') {
+ return false;
+ }
}
if (input_chars[length] == '"') {
position_ = position_ + length + 1;
@@ -172,7 +173,7 @@ class JsonParser BASE_EMBEDDED {
inline Factory* factory() { return factory_; }
inline Handle<JSFunction> object_constructor() { return object_constructor_; }
- static const int kInitialSpecialStringLength = 1024;
+ static const int kInitialSpecialStringLength = 32;
static const int kPretenureTreshold = 100 * 1024;
@@ -244,9 +245,7 @@ MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
MessageLocation location(factory->NewScript(source_),
position_,
position_ + 1);
- Handle<Object> error;
- ASSIGN_RETURN_ON_EXCEPTION(isolate(), error,
- factory->NewSyntaxError(message, array), Object);
+ Handle<Object> error = factory->NewSyntaxError(message, array);
return isolate()->template Throw<Object>(error, &location);
}
return result;
@@ -262,6 +261,12 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonValue() {
return Handle<Object>::null();
}
+ if (isolate_->stack_guard()->InterruptRequested()) {
+ ExecutionAccess access(isolate_);
+ // Avoid blocking GC in long running parser (v8:3974).
+ isolate_->stack_guard()->CheckAndHandleGCInterrupt();
+ }
+
if (c0_ == '"') return ParseJsonString();
if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
if (c0_ == '{') return ParseJsonObject();
@@ -301,6 +306,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
Handle<JSObject> json_object =
factory()->NewJSObject(object_constructor(), pretenure_);
Handle<Map> map(json_object->map());
+ int descriptor = 0;
ZoneList<Handle<Object> > properties(8, zone());
DCHECK_EQ(c0_, '{');
@@ -315,7 +321,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
Advance();
uint32_t index = 0;
- if (c0_ >= '0' && c0_ <= '9') {
+ if (IsDecimalDigit(c0_)) {
// Maybe an array index, try to parse it.
if (c0_ == '0') {
// With a leading zero, the string has to be "0" only to be an index.
@@ -326,7 +332,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
if (index > 429496729U - ((d > 5) ? 1 : 0)) break;
index = (index * 10) + d;
Advance();
- } while (c0_ >= '0' && c0_ <= '9');
+ } while (IsDecimalDigit(c0_));
}
if (c0_ == '"') {
@@ -360,19 +366,19 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
bool follow_expected = false;
Handle<Map> target;
if (seq_one_byte) {
- key = Map::ExpectedTransitionKey(map);
+ key = TransitionArray::ExpectedTransitionKey(map);
follow_expected = !key.is_null() && ParseJsonString(key);
}
// If the expected transition hits, follow it.
if (follow_expected) {
- target = Map::ExpectedTransitionTarget(map);
+ target = TransitionArray::ExpectedTransitionTarget(map);
} else {
// If the expected transition failed, parse an internalized string and
// try to find a matching transition.
key = ParseJsonInternalizedString();
if (key.is_null()) return ReportUnexpectedCharacter();
- target = Map::FindTransitionToField(map, key);
+ target = TransitionArray::FindTransitionToField(map, key);
// If a transition was found, follow it and continue.
transitioning = !target.is_null();
}
@@ -383,18 +389,15 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
if (value.is_null()) return ReportUnexpectedCharacter();
if (transitioning) {
- int descriptor = map->NumberOfOwnDescriptors();
PropertyDetails details =
target->instance_descriptors()->GetDetails(descriptor);
Representation expected_representation = details.representation();
if (value->FitsRepresentation(expected_representation)) {
- if (expected_representation.IsDouble()) {
- value = Object::NewStorageFor(isolate(), value,
- expected_representation);
- } else if (expected_representation.IsHeapObject() &&
- !target->instance_descriptors()->GetFieldType(
- descriptor)->NowContains(value)) {
+ if (expected_representation.IsHeapObject() &&
+ !target->instance_descriptors()
+ ->GetFieldType(descriptor)
+ ->NowContains(value)) {
Handle<HeapType> value_type(value->OptimalType(
isolate(), expected_representation));
Map::GeneralizeFieldType(target, descriptor,
@@ -404,6 +407,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
descriptor)->NowContains(value));
properties.Add(value, zone());
map = target;
+ descriptor++;
continue;
} else {
transitioning = false;
@@ -445,30 +449,11 @@ void JsonParser<seq_one_byte>::CommitStateToJsonObject(
DCHECK(!json_object->map()->is_dictionary_map());
DisallowHeapAllocation no_gc;
- Factory* factory = isolate()->factory();
- // If the |json_object|'s map is exactly the same as |map| then the
- // |properties| values correspond to the |map| and nothing more has to be
- // done. But if the |json_object|'s map is different then we have to
- // iterate descriptors to ensure that properties still correspond to the
- // map.
- bool slow_case = json_object->map() != *map;
- DescriptorArray* descriptors = NULL;
int length = properties->length();
- if (slow_case) {
- descriptors = json_object->map()->instance_descriptors();
- DCHECK(json_object->map()->NumberOfOwnDescriptors() == length);
- }
for (int i = 0; i < length; i++) {
Handle<Object> value = (*properties)[i];
- if (slow_case && value->IsMutableHeapNumber() &&
- !descriptors->GetDetails(i).representation().IsDouble()) {
- // Turn mutable heap numbers into immutable if the field representation
- // is not double.
- HeapNumber::cast(*value)->set_map(*factory->heap_number_map());
- }
- FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
- json_object->FastPropertyAtPut(index, *value);
+ json_object->WriteToField(i, *value);
}
}
@@ -516,7 +501,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
Advance();
// Prefix zero is only allowed if it's the only digit before
// a decimal point or exponent.
- if ('0' <= c0_ && c0_ <= '9') return ReportUnexpectedCharacter();
+ if (IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
} else {
int i = 0;
int digits = 0;
@@ -525,7 +510,7 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
i = i * 10 + c0_ - '0';
digits++;
Advance();
- } while (c0_ >= '0' && c0_ <= '9');
+ } while (IsDecimalDigit(c0_));
if (c0_ != '.' && c0_ != 'e' && c0_ != 'E' && digits < 10) {
SkipWhitespace();
return Handle<Smi>(Smi::FromInt((negative ? -i : i)), isolate());
@@ -533,18 +518,18 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
}
if (c0_ == '.') {
Advance();
- if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
+ if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
do {
Advance();
- } while (c0_ >= '0' && c0_ <= '9');
+ } while (IsDecimalDigit(c0_));
}
if (AsciiAlphaToLower(c0_) == 'e') {
Advance();
if (c0_ == '-' || c0_ == '+') Advance();
- if (c0_ < '0' || c0_ > '9') return ReportUnexpectedCharacter();
+ if (!IsDecimalDigit(c0_)) return ReportUnexpectedCharacter();
do {
Advance();
- } while (c0_ >= '0' && c0_ <= '9');
+ } while (IsDecimalDigit(c0_));
}
int length = position_ - beg_pos;
double number;
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index d7caefc9e9..fabc8f3929 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -272,10 +272,9 @@ BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
for (int i = 0; i < length; i++) {
if (elements->get(i) == *object) {
AllowHeapAllocation allow_to_return_error;
- Handle<Object> error;
- MaybeHandle<Object> maybe_error = factory()->NewTypeError(
+ Handle<Object> error = factory()->NewTypeError(
"circular_structure", HandleVector<Object>(NULL, 0));
- if (maybe_error.ToHandle(&error)) isolate_->Throw(*error);
+ isolate_->Throw(*error);
return EXCEPTION;
}
}
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index e2b7dc816b..90ffa25131 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -178,7 +178,7 @@ function JSONStringify(value, replacer, space) {
}
var gap;
if (IS_NUMBER(space)) {
- space = MathMax(0, MathMin(ToInteger(space), 10));
+ space = $max(0, $min(ToInteger(space), 10));
gap = %_SubString(" ", 0, space);
} else if (IS_STRING(space)) {
if (space.length > 10) {
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index bb7ad60414..03398cf693 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -360,10 +360,8 @@ static void CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
elements->set(0, re->Pattern());
elements->set(1, *error_message);
Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> error;
- MaybeHandle<Object> maybe_error =
- factory->NewSyntaxError("malformed_regexp", array);
- if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
+ Handle<Object> error = factory->NewSyntaxError("malformed_regexp", array);
+ isolate->Throw(*error);
}
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
index ceee09a810..ba76704d5f 100644
--- a/deps/v8/src/layout-descriptor-inl.h
+++ b/deps/v8/src/layout-descriptor-inl.h
@@ -20,18 +20,7 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
// The whole bit vector fits into a smi.
return handle(LayoutDescriptor::FromSmi(Smi::FromInt(0)), isolate);
}
-
- length = (length + kNumberOfBits - 1) / kNumberOfBits;
- DCHECK(length > 0);
-
- if (SmiValuesAre32Bits() && (length & 1)) {
- // On 64-bit systems if the length is odd then the half-word space would be
- // lost anyway (due to alignment and the fact that we are allocating
- // uint32-typed array), so we increase the length of allocated array
- // to utilize that "lost" space which could also help to avoid layout
- // descriptor reallocations.
- ++length;
- }
+ length = GetSlowModeBackingStoreLength(length);
return Handle<LayoutDescriptor>::cast(
isolate->factory()->NewFixedTypedArray(length, kExternalUint32Array));
}
@@ -154,6 +143,77 @@ LayoutDescriptor* LayoutDescriptor::cast_gc_safe(Object* object) {
}
+int LayoutDescriptor::GetSlowModeBackingStoreLength(int length) {
+ length = (length + kNumberOfBits - 1) / kNumberOfBits;
+ DCHECK_LT(0, length);
+
+ if (SmiValuesAre32Bits() && (length & 1)) {
+ // On 64-bit systems if the length is odd then the half-word space would be
+ // lost anyway (due to alignment and the fact that we are allocating
+ // uint32-typed array), so we increase the length of allocated array
+ // to utilize that "lost" space which could also help to avoid layout
+ // descriptor reallocations.
+ ++length;
+ }
+ return length;
+}
+
+
+int LayoutDescriptor::CalculateCapacity(Map* map, DescriptorArray* descriptors,
+ int num_descriptors) {
+ int inobject_properties = map->inobject_properties();
+ if (inobject_properties == 0) return 0;
+
+ DCHECK_LE(num_descriptors, descriptors->number_of_descriptors());
+
+ int layout_descriptor_length;
+ const int kMaxWordsPerField = kDoubleSize / kPointerSize;
+
+ if (num_descriptors <= kSmiValueSize / kMaxWordsPerField) {
+ // Even in the "worst" case (all fields are doubles) it would fit into
+ // a Smi, so no need to calculate length.
+ layout_descriptor_length = kSmiValueSize;
+
+ } else {
+ layout_descriptor_length = 0;
+
+ for (int i = 0; i < num_descriptors; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (!InobjectUnboxedField(inobject_properties, details)) continue;
+ int field_index = details.field_index();
+ int field_width_in_words = details.field_width_in_words();
+ layout_descriptor_length =
+ Max(layout_descriptor_length, field_index + field_width_in_words);
+ }
+ }
+ layout_descriptor_length = Min(layout_descriptor_length, inobject_properties);
+ return layout_descriptor_length;
+}
+
+
+LayoutDescriptor* LayoutDescriptor::Initialize(
+ LayoutDescriptor* layout_descriptor, Map* map, DescriptorArray* descriptors,
+ int num_descriptors) {
+ DisallowHeapAllocation no_allocation;
+ int inobject_properties = map->inobject_properties();
+
+ for (int i = 0; i < num_descriptors; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (!InobjectUnboxedField(inobject_properties, details)) {
+ DCHECK(details.location() != kField ||
+ layout_descriptor->IsTagged(details.field_index()));
+ continue;
+ }
+ int field_index = details.field_index();
+ layout_descriptor = layout_descriptor->SetRawData(field_index);
+ if (details.field_width_in_words() > 1) {
+ layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
+ }
+ }
+ return layout_descriptor;
+}
+
+
// InobjectPropertiesHelper is a helper class for querying whether inobject
// property at offset is Double or not.
LayoutDescriptorHelper::LayoutDescriptorHelper(Map* map)
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
index 121836c173..4bb48c0585 100644
--- a/deps/v8/src/layout-descriptor.cc
+++ b/deps/v8/src/layout-descriptor.cc
@@ -19,55 +19,22 @@ Handle<LayoutDescriptor> LayoutDescriptor::New(
Isolate* isolate = descriptors->GetIsolate();
if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate);
- int inobject_properties = map->inobject_properties();
- if (inobject_properties == 0) return handle(FastPointerLayout(), isolate);
+ int layout_descriptor_length =
+ CalculateCapacity(*map, *descriptors, num_descriptors);
- DCHECK(num_descriptors <= descriptors->number_of_descriptors());
-
- int layout_descriptor_length;
- const int kMaxWordsPerField = kDoubleSize / kPointerSize;
-
- if (num_descriptors <= kSmiValueSize / kMaxWordsPerField) {
- // Even in the "worst" case (all fields are doubles) it would fit into
- // a Smi, so no need to calculate length.
- layout_descriptor_length = kSmiValueSize;
-
- } else {
- layout_descriptor_length = 0;
-
- for (int i = 0; i < num_descriptors; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (!InobjectUnboxedField(inobject_properties, details)) continue;
- int field_index = details.field_index();
- int field_width_in_words = details.field_width_in_words();
- layout_descriptor_length =
- Max(layout_descriptor_length, field_index + field_width_in_words);
- }
-
- if (layout_descriptor_length == 0) {
- // No double fields were found, use fast pointer layout.
- return handle(FastPointerLayout(), isolate);
- }
+ if (layout_descriptor_length == 0) {
+ // No double fields were found, use fast pointer layout.
+ return handle(FastPointerLayout(), isolate);
}
- layout_descriptor_length = Min(layout_descriptor_length, inobject_properties);
// Initially, layout descriptor corresponds to an object with all fields
// tagged.
Handle<LayoutDescriptor> layout_descriptor_handle =
LayoutDescriptor::New(isolate, layout_descriptor_length);
- DisallowHeapAllocation no_allocation;
- LayoutDescriptor* layout_descriptor = *layout_descriptor_handle;
-
- for (int i = 0; i < num_descriptors; i++) {
- PropertyDetails details = descriptors->GetDetails(i);
- if (!InobjectUnboxedField(inobject_properties, details)) continue;
- int field_index = details.field_index();
- layout_descriptor = layout_descriptor->SetRawData(field_index);
- if (details.field_width_in_words() > 1) {
- layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
- }
- }
+ LayoutDescriptor* layout_descriptor = Initialize(
+ *layout_descriptor_handle, *map, *descriptors, num_descriptors);
+
return handle(layout_descriptor, isolate);
}
@@ -258,13 +225,44 @@ bool LayoutDescriptorHelper::IsTagged(
}
-bool LayoutDescriptor::IsConsistentWithMap(Map* map) {
+LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map,
+ DescriptorArray* descriptors,
+ int num_descriptors) {
+ DisallowHeapAllocation no_allocation;
+ // Fast mode descriptors are never shared and therefore always fully
+ // correspond to their map.
+ if (!IsSlowLayout()) return this;
+
+ int layout_descriptor_length =
+ CalculateCapacity(map, descriptors, num_descriptors);
+ // It must not become fast-mode descriptor here, because otherwise it has to
+ // be fast pointer layout descriptor already but it's is slow mode now.
+ DCHECK_LT(kSmiValueSize, layout_descriptor_length);
+
+ // Trim, clean and reinitialize this slow-mode layout descriptor.
+ int array_length = GetSlowModeBackingStoreLength(layout_descriptor_length);
+ int current_length = length();
+ if (current_length != array_length) {
+ DCHECK_LT(array_length, current_length);
+ int delta = current_length - array_length;
+ heap->RightTrimFixedArray<Heap::FROM_GC>(this, delta);
+ }
+ memset(DataPtr(), 0, DataSize());
+ LayoutDescriptor* layout_descriptor =
+ Initialize(this, map, descriptors, num_descriptors);
+ DCHECK_EQ(this, layout_descriptor);
+ return layout_descriptor;
+}
+
+
+bool LayoutDescriptor::IsConsistentWithMap(Map* map, bool check_tail) {
if (FLAG_unbox_double_fields) {
DescriptorArray* descriptors = map->instance_descriptors();
int nof_descriptors = map->NumberOfOwnDescriptors();
+ int last_field_index = 0;
for (int i = 0; i < nof_descriptors; i++) {
PropertyDetails details = descriptors->GetDetails(i);
- if (details.type() != DATA) continue;
+ if (details.location() != kField) continue;
FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
bool tagged_expected =
!field_index.is_inobject() || !details.representation().IsDouble();
@@ -273,6 +271,15 @@ bool LayoutDescriptor::IsConsistentWithMap(Map* map) {
DCHECK_EQ(tagged_expected, tagged_actual);
if (tagged_actual != tagged_expected) return false;
}
+ last_field_index =
+ Max(last_field_index,
+ details.field_index() + details.field_width_in_words());
+ }
+ if (check_tail) {
+ int n = capacity();
+ for (int i = last_field_index; i < n; i++) {
+ DCHECK(IsTagged(i));
+ }
}
}
return true;
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
index 8f2942c0eb..0a14f53198 100644
--- a/deps/v8/src/layout-descriptor.h
+++ b/deps/v8/src/layout-descriptor.h
@@ -70,7 +70,14 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
V8_INLINE static LayoutDescriptor* FastPointerLayout();
// Check that this layout descriptor corresponds to given map.
- bool IsConsistentWithMap(Map* map);
+ bool IsConsistentWithMap(Map* map, bool check_tail = false);
+
+ // Trims this layout descriptor to given number of descriptors. This happens
+ // only when corresponding descriptors array is trimmed.
+ // The layout descriptor could be trimmed if it was slow or it could
+ // become fast.
+ LayoutDescriptor* Trim(Heap* heap, Map* map, DescriptorArray* descriptors,
+ int num_descriptors);
#ifdef OBJECT_PRINT
// For our gdb macros, we should perhaps change these in the future.
@@ -94,6 +101,21 @@ class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
V8_INLINE static bool InobjectUnboxedField(int inobject_properties,
PropertyDetails details);
+ // Calculates minimal layout descriptor capacity required for given
+ // |map|, |descriptors| and |num_descriptors|.
+ V8_INLINE static int CalculateCapacity(Map* map, DescriptorArray* descriptors,
+ int num_descriptors);
+
+ // Calculates the length of the slow-mode backing store array by given layout
+ // descriptor length.
+ V8_INLINE static int GetSlowModeBackingStoreLength(int length);
+
+ // Fills in clean |layout_descriptor| according to given |map|, |descriptors|
+ // and |num_descriptors|.
+ V8_INLINE static LayoutDescriptor* Initialize(
+ LayoutDescriptor* layout_descriptor, Map* map,
+ DescriptorArray* descriptors, int num_descriptors);
+
static Handle<LayoutDescriptor> EnsureCapacity(
Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
int new_capacity);
diff --git a/deps/v8/src/lithium-codegen.cc b/deps/v8/src/lithium-codegen.cc
index 242db222f2..89df2ce845 100644
--- a/deps/v8/src/lithium-codegen.cc
+++ b/deps/v8/src/lithium-codegen.cc
@@ -153,7 +153,7 @@ void LCodeGenBase::Comment(const char* format, ...) {
void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
- masm()->RecordDeoptReason(deopt_info.deopt_reason, deopt_info.raw_position);
+ masm()->RecordDeoptReason(deopt_info.deopt_reason, deopt_info.position);
}
@@ -189,4 +189,13 @@ void LCodeGenBase::AddStabilityDependency(Handle<Map> map) {
chunk_->AddStabilityDependency(map);
}
+
+Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
+ LInstruction* instr, Deoptimizer::DeoptReason deopt_reason) {
+ Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
+ instr->Mnemonic(), deopt_reason);
+ HEnterInlined* enter_inlined = instr->environment()->entry();
+ deopt_info.inlining_id = enter_inlined ? enter_inlined->inlining_id() : 0;
+ return deopt_info;
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/lithium-codegen.h b/deps/v8/src/lithium-codegen.h
index 17bf78cac0..80afbaf235 100644
--- a/deps/v8/src/lithium-codegen.h
+++ b/deps/v8/src/lithium-codegen.h
@@ -36,6 +36,8 @@ class LCodeGenBase BASE_EMBEDDED {
void FPRINTF_CHECKING Comment(const char* format, ...);
void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info);
+ static Deoptimizer::DeoptInfo MakeDeoptInfo(
+ LInstruction* instr, Deoptimizer::DeoptReason deopt_reason);
bool GenerateBody();
virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc
index c15e9dbee2..a2563d88d6 100644
--- a/deps/v8/src/lithium.cc
+++ b/deps/v8/src/lithium.cc
@@ -7,7 +7,6 @@
#include "src/v8.h"
#include "src/scopes.h"
-#include "src/serialize.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/lithium-ia32.h" // NOLINT
@@ -448,6 +447,10 @@ void LChunk::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) const {
}
}
for (int i = 0; i < maps.length(); i++) {
+ if (maps.at(i)->dependent_code()->number_of_entries(
+ DependentCode::kWeakCodeGroup) == 0) {
+ isolate()->heap()->AddRetainedMap(maps.at(i));
+ }
Map::AddDependentCode(maps.at(i), DependentCode::kWeakCodeGroup, code);
}
for (int i = 0; i < objects.length(); i++) {
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index e880cab7b7..c03d8d3e93 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -995,9 +995,6 @@ class LiteralFixer {
Handle<SharedFunctionInfo> shared_info,
Isolate* isolate) {
int new_literal_count = compile_info_wrapper->GetLiteralCount();
- if (new_literal_count > 0) {
- new_literal_count += JSFunction::kLiteralsPrefixSize;
- }
int old_literal_count = shared_info->num_literals();
if (old_literal_count == new_literal_count) {
@@ -1013,21 +1010,8 @@ class LiteralFixer {
CollectJSFunctions(shared_info, isolate);
for (int i = 0; i < function_instances->length(); i++) {
Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
- Handle<FixedArray> old_literals(fun->literals());
Handle<FixedArray> new_literals =
isolate->factory()->NewFixedArray(new_literal_count);
- if (new_literal_count > 0) {
- Handle<Context> native_context;
- if (old_literals->length() >
- JSFunction::kLiteralNativeContextIndex) {
- native_context = Handle<Context>(
- JSFunction::NativeContextFromLiterals(fun->literals()));
- } else {
- native_context = Handle<Context>(fun->context()->native_context());
- }
- new_literals->set(JSFunction::kLiteralNativeContextIndex,
- *native_context);
- }
fun->set_literals(*new_literals);
}
@@ -1075,7 +1059,7 @@ class LiteralFixer {
void visit(JSFunction* fun) {
FixedArray* literals = fun->literals();
int len = literals->length();
- for (int j = JSFunction::kLiteralsPrefixSize; j < len; j++) {
+ for (int j = 0; j < len; j++) {
literals->set_undefined(j);
}
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 9f2eed1d4a..1ae7b27f44 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -21,7 +21,6 @@
#include "src/macro-assembler.h"
#include "src/perf-jit.h"
#include "src/runtime-profiler.h"
-#include "src/serialize.h"
#include "src/string-stream.h"
#include "src/vm-state-inl.h"
@@ -876,27 +875,9 @@ void Logger::ApiEvent(const char* format, ...) {
}
-void Logger::ApiNamedSecurityCheck(Object* key) {
+void Logger::ApiSecurityCheck() {
if (!log_->IsEnabled() || !FLAG_log_api) return;
- if (key->IsString()) {
- SmartArrayPointer<char> str =
- String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,check-security,\"%s\"", str.get());
- } else if (key->IsSymbol()) {
- Symbol* symbol = Symbol::cast(key);
- if (symbol->name()->IsUndefined()) {
- ApiEvent("api,check-security,symbol(hash %x)", Symbol::cast(key)->Hash());
- } else {
- SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
- DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- ApiEvent("api,check-security,symbol(\"%s\" hash %x)", str.get(),
- Symbol::cast(key)->Hash());
- }
- } else if (key->IsUndefined()) {
- ApiEvent("api,check-security,undefined");
- } else {
- ApiEvent("api,check-security,['no-name']");
- }
+ ApiEvent("api,check-security");
}
@@ -911,9 +892,8 @@ void Logger::SharedLibraryEvent(const std::string& library_path,
}
-void Logger::CodeDeoptEvent(Code* code, int bailout_id, Address from,
- int fp_to_sp_delta) {
- PROFILER_LOG(CodeDeoptEvent(code, bailout_id, from, fp_to_sp_delta));
+void Logger::CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta) {
+ PROFILER_LOG(CodeDeoptEvent(code, pc, fp_to_sp_delta));
if (!log_->IsEnabled() || !FLAG_log_internal_timer_events) return;
Log::MessageBuilder msg(log_);
int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
@@ -1029,12 +1009,6 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
}
-void Logger::ApiIndexedSecurityCheck(uint32_t index) {
- if (!log_->IsEnabled() || !FLAG_log_api) return;
- ApiEvent("api,check-security,%u", index);
-}
-
-
void Logger::ApiNamedPropertyAccess(const char* tag,
JSObject* holder,
Object* name) {
@@ -1412,8 +1386,6 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
void Logger::SharedFunctionInfoMoveEvent(Address from, Address to) {
- PROFILER_LOG(SharedFunctionInfoMoveEvent(from, to));
-
if (!is_logging_code_events()) return;
MoveEventInternal(SHARED_FUNC_MOVE_EVENT, from, to);
}
@@ -1793,8 +1765,16 @@ static void AddIsolateIdIfNeeded(std::ostream& os, // NOLINT
static void PrepareLogFileName(std::ostream& os, // NOLINT
Isolate* isolate, const char* file_name) {
- AddIsolateIdIfNeeded(os, isolate);
+ int dir_separator_count = 0;
for (const char* p = file_name; *p; p++) {
+ if (base::OS::isDirectorySeparator(*p)) dir_separator_count++;
+ }
+
+ for (const char* p = file_name; *p; p++) {
+ if (dir_separator_count == 0) {
+ AddIsolateIdIfNeeded(os, isolate);
+ dir_separator_count--;
+ }
if (*p == '%') {
p++;
switch (*p) {
@@ -1820,6 +1800,7 @@ static void PrepareLogFileName(std::ostream& os, // NOLINT
break;
}
} else {
+ if (base::OS::isDirectorySeparator(*p)) dir_separator_count--;
os << *p;
}
}
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index bb7ff32e7d..c0559e7895 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -210,8 +210,7 @@ class Logger {
// ==== Events logged by --log-api. ====
- void ApiNamedSecurityCheck(Object* key);
- void ApiIndexedSecurityCheck(uint32_t index);
+ void ApiSecurityCheck();
void ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name);
void ApiIndexedPropertyAccess(const char* tag,
JSObject* holder,
@@ -292,8 +291,7 @@ class Logger {
uintptr_t start,
uintptr_t end);
- void CodeDeoptEvent(Code* code, int bailout_id, Address from,
- int fp_to_sp_delta);
+ void CodeDeoptEvent(Code* code, Address pc, int fp_to_sp_delta);
void CurrentTimeEvent();
void TimerEvent(StartEnd se, const char* name);
diff --git a/deps/v8/src/lookup-inl.h b/deps/v8/src/lookup-inl.h
index ffc02e7878..a801e3493e 100644
--- a/deps/v8/src/lookup-inl.h
+++ b/deps/v8/src/lookup-inl.h
@@ -31,17 +31,28 @@ JSReceiver* LookupIterator::NextHolder(Map* map) {
}
-LookupIterator::State LookupIterator::LookupInHolder(Map* map,
- JSReceiver* holder) {
+LookupIterator::State LookupIterator::LookupInHolder(Map* const map,
+ JSReceiver* const holder) {
STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
DisallowHeapAllocation no_gc;
+ if (interceptor_state_ == InterceptorState::kProcessNonMasking) {
+ return LookupNonMaskingInterceptorInHolder(map, holder);
+ }
switch (state_) {
case NOT_FOUND:
if (map->IsJSProxyMap()) return JSPROXY;
- if (map->is_access_check_needed()) return ACCESS_CHECK;
+ if (map->is_access_check_needed() &&
+ !isolate_->IsInternallyUsedPropertyName(name_)) {
+ return ACCESS_CHECK;
+ }
// Fall through.
case ACCESS_CHECK:
- if (check_interceptor() && map->has_named_interceptor()) {
+ if (exotic_index_state_ != ExoticIndexState::kNoIndex &&
+ IsIntegerIndexedExotic(holder)) {
+ return INTEGER_INDEXED_EXOTIC;
+ }
+ if (check_interceptor() && map->has_named_interceptor() &&
+ !SkipInterceptor(JSObject::cast(holder))) {
return INTERCEPTOR;
}
// Fall through.
@@ -50,12 +61,12 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* map,
NameDictionary* dict = JSObject::cast(holder)->property_dictionary();
number_ = dict->FindEntry(name_);
if (number_ == NameDictionary::kNotFound) return NOT_FOUND;
- property_details_ = dict->DetailsAt(number_);
if (holder->IsGlobalObject()) {
- if (property_details_.IsDeleted()) return NOT_FOUND;
+ DCHECK(dict->ValueAt(number_)->IsPropertyCell());
PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
if (cell->value()->IsTheHole()) return NOT_FOUND;
}
+ property_details_ = dict->DetailsAt(number_);
} else {
DescriptorArray* descriptors = map->instance_descriptors();
number_ = descriptors->SearchWithCache(*name_, map);
@@ -72,6 +83,7 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* map,
case ACCESSOR:
case DATA:
return NOT_FOUND;
+ case INTEGER_INDEXED_EXOTIC:
case JSPROXY:
case TRANSITION:
UNREACHABLE();
@@ -79,6 +91,23 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* map,
UNREACHABLE();
return state_;
}
+
+
+LookupIterator::State LookupIterator::LookupNonMaskingInterceptorInHolder(
+ Map* const map, JSReceiver* const holder) {
+ switch (state_) {
+ case NOT_FOUND:
+ if (check_interceptor() && map->has_named_interceptor() &&
+ !SkipInterceptor(JSObject::cast(holder))) {
+ return INTERCEPTOR;
+ }
+ // Fall through.
+ default:
+ return NOT_FOUND;
+ }
+ UNREACHABLE();
+ return state_;
+}
}
} // namespace v8::internal
diff --git a/deps/v8/src/lookup.cc b/deps/v8/src/lookup.cc
index 672c026c3c..5a6c5da3dd 100644
--- a/deps/v8/src/lookup.cc
+++ b/deps/v8/src/lookup.cc
@@ -29,7 +29,13 @@ void LookupIterator::Next() {
// Continue lookup if lookup on current holder failed.
do {
JSReceiver* maybe_holder = NextHolder(map);
- if (maybe_holder == NULL) break;
+ if (maybe_holder == nullptr) {
+ if (interceptor_state_ == InterceptorState::kSkipNonMasking) {
+ RestartLookupForNonMaskingInterceptors();
+ return;
+ }
+ break;
+ }
holder = maybe_holder;
map = holder->map();
state_ = LookupInHolder(map, holder);
@@ -42,11 +48,25 @@ void LookupIterator::Next() {
}
-Handle<JSReceiver> LookupIterator::GetRoot() const {
- if (receiver_->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver_);
- Handle<Object> root =
- handle(receiver_->GetRootMap(isolate_)->prototype(), isolate_);
- CHECK(!root->IsNull());
+void LookupIterator::RestartLookupForNonMaskingInterceptors() {
+ interceptor_state_ = InterceptorState::kProcessNonMasking;
+ state_ = NOT_FOUND;
+ property_details_ = PropertyDetails::Empty();
+ number_ = DescriptorArray::kNotFound;
+ holder_ = initial_holder_;
+ holder_map_ = handle(holder_->map(), isolate_);
+ Next();
+}
+
+
+Handle<JSReceiver> LookupIterator::GetRoot(Handle<Object> receiver,
+ Isolate* isolate) {
+ if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
+ auto root = handle(receiver->GetRootMap(isolate)->prototype(), isolate);
+ if (root->IsNull()) {
+ unsigned int magic = 0xbbbbbbbb;
+ isolate->PushStackTraceAndDie(magic, *receiver, NULL, magic);
+ }
return Handle<JSReceiver>::cast(root);
}
@@ -72,14 +92,15 @@ bool LookupIterator::IsBootstrapping() const {
}
-bool LookupIterator::HasAccess(v8::AccessType access_type) const {
+bool LookupIterator::HasAccess() const {
DCHECK_EQ(ACCESS_CHECK, state_);
- return isolate_->MayNamedAccess(GetHolder<JSObject>(), name_, access_type);
+ return isolate_->MayAccess(GetHolder<JSObject>());
}
void LookupIterator::ReloadPropertyInformation() {
state_ = BEFORE_PROPERTY;
+ interceptor_state_ = InterceptorState::kUninitialized;
state_ = LookupInHolder(*holder_map_, *holder_);
DCHECK(IsFound() || holder_map_->is_dictionary_map());
}
@@ -102,7 +123,8 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
DCHECK(HolderIsReceiverOrHiddenPrototype());
Handle<JSObject> holder = GetHolder<JSObject>();
if (holder_map_->is_dictionary_map()) {
- PropertyDetails details(attributes, v8::internal::DATA, 0);
+ PropertyDetails details(attributes, v8::internal::DATA, 0,
+ PropertyCellType::kMutable);
JSObject::SetNormalizedProperty(holder, name(), value, details);
} else {
holder_map_ = Map::ReconfigureExistingProperty(
@@ -120,9 +142,9 @@ void LookupIterator::PrepareTransitionToDataProperty(
Handle<Object> value, PropertyAttributes attributes,
Object::StoreFromKeyed store_mode) {
if (state_ == TRANSITION) return;
- DCHECK(state_ != LookupIterator::ACCESSOR);
+ DCHECK_NE(LookupIterator::ACCESSOR, state_);
+ DCHECK_NE(LookupIterator::INTEGER_INDEXED_EXOTIC, state_);
DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
- DCHECK(!IsSpecialNumericIndex());
// Can only be called when the receiver is a JSObject. JSProxy has to be
// handled via a trap. Adding properties to primitive values is not
// observable.
@@ -182,14 +204,10 @@ void LookupIterator::TransitionToAccessorProperty(
if (!holder_map_->is_dictionary_map()) return;
- // We have to deoptimize since accesses to data properties may have been
- // inlined without a corresponding map-check.
- if (holder_map_->IsGlobalObjectMap()) {
- Deoptimizer::DeoptimizeGlobalObject(*receiver);
- }
// Install the accessor into the dictionary-mode object.
- PropertyDetails details(attributes, ACCESSOR_CONSTANT, 0);
+ PropertyDetails details(attributes, ACCESSOR_CONSTANT, 0,
+ PropertyCellType::kMutable);
Handle<AccessorPair> pair;
if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
pair = Handle<AccessorPair>::cast(GetAccessors());
@@ -241,6 +259,7 @@ Handle<Object> LookupIterator::FetchValue() const {
if (holder_map_->is_dictionary_map()) {
result = holder->property_dictionary()->ValueAt(number_);
if (holder_map_->IsGlobalObjectMap()) {
+ DCHECK(result->IsPropertyCell());
result = PropertyCell::cast(result)->value();
}
} else if (property_details_.type() == v8::internal::DATA) {
@@ -295,7 +314,8 @@ Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
Handle<JSObject> holder = GetHolder<JSObject>();
Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
Object* value = global->property_dictionary()->ValueAt(dictionary_entry());
- return Handle<PropertyCell>(PropertyCell::cast(value));
+ DCHECK(value->IsPropertyCell());
+ return handle(PropertyCell::cast(value));
}
@@ -316,11 +336,11 @@ Handle<Object> LookupIterator::WriteDataValue(Handle<Object> value) {
DCHECK_EQ(DATA, state_);
Handle<JSObject> holder = GetHolder<JSObject>();
if (holder_map_->is_dictionary_map()) {
- NameDictionary* property_dictionary = holder->property_dictionary();
+ Handle<NameDictionary> property_dictionary =
+ handle(holder->property_dictionary());
if (holder->IsGlobalObject()) {
- Handle<PropertyCell> cell(
- PropertyCell::cast(property_dictionary->ValueAt(dictionary_entry())));
- value = PropertyCell::SetValueInferType(cell, value);
+ value = PropertyCell::UpdateCell(property_dictionary, dictionary_entry(),
+ value, property_details_);
} else {
property_dictionary->ValueAtPut(dictionary_entry(), *value);
}
@@ -333,25 +353,23 @@ Handle<Object> LookupIterator::WriteDataValue(Handle<Object> value) {
}
-bool LookupIterator::IsSpecialNumericIndex() const {
- if (GetStoreTarget()->IsJSTypedArray() && name()->IsString()) {
+bool LookupIterator::IsIntegerIndexedExotic(JSReceiver* holder) {
+ DCHECK(exotic_index_state_ != ExoticIndexState::kNoIndex);
+ // Currently typed arrays are the only such objects.
+ if (!holder->IsJSTypedArray()) return false;
+ if (exotic_index_state_ == ExoticIndexState::kIndex) return true;
+ DCHECK(exotic_index_state_ == ExoticIndexState::kUninitialized);
+ bool result = false;
+ // Compute and cache result.
+ if (name()->IsString()) {
Handle<String> name_string = Handle<String>::cast(name());
- if (name_string->length() > 0) {
- double d =
- StringToDouble(isolate()->unicode_cache(), name_string, NO_FLAGS);
- if (!std::isnan(d)) {
- if (String::Equals(isolate()->factory()->minus_zero_string(),
- name_string))
- return true;
-
- Factory* factory = isolate()->factory();
- Handle<Object> num = factory->NewNumber(d);
- Handle<String> roundtrip_string = factory->NumberToString(num);
- if (String::Equals(name_string, roundtrip_string)) return true;
- }
+ if (name_string->length() != 0) {
+ result = IsNonArrayIndexInteger(*name_string);
}
}
- return false;
+ exotic_index_state_ =
+ result ? ExoticIndexState::kIndex : ExoticIndexState::kNoIndex;
+ return result;
}
@@ -359,4 +377,22 @@ void LookupIterator::InternalizeName() {
if (name_->IsUniqueName()) return;
name_ = factory()->InternalizeString(Handle<String>::cast(name_));
}
+
+
+bool LookupIterator::SkipInterceptor(JSObject* holder) {
+ auto info = holder->GetNamedInterceptor();
+ // TODO(dcarney): check for symbol/can_intercept_symbols here as well.
+ if (info->non_masking()) {
+ switch (interceptor_state_) {
+ case InterceptorState::kUninitialized:
+ interceptor_state_ = InterceptorState::kSkipNonMasking;
+ // Fall through.
+ case InterceptorState::kSkipNonMasking:
+ return true;
+ case InterceptorState::kProcessNonMasking:
+ return false;
+ }
+ }
+ return interceptor_state_ == InterceptorState::kProcessNonMasking;
+}
} } // namespace v8::internal
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 5a5466ebe4..f658b13829 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -31,6 +31,7 @@ class LookupIterator FINAL BASE_EMBEDDED {
enum State {
ACCESS_CHECK,
+ INTEGER_INDEXED_EXOTIC,
INTERCEPTOR,
JSPROXY,
NOT_FOUND,
@@ -46,13 +47,16 @@ class LookupIterator FINAL BASE_EMBEDDED {
Configuration configuration = PROTOTYPE_CHAIN)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
- property_details_(NONE, v8::internal::DATA, 0),
+ exotic_index_state_(ExoticIndexState::kUninitialized),
+ interceptor_state_(InterceptorState::kUninitialized),
+ property_details_(PropertyDetails::Empty()),
isolate_(name->GetIsolate()),
name_(name),
receiver_(receiver),
+ holder_(GetRoot(receiver_, isolate_)),
+ holder_map_(holder_->map(), isolate_),
+ initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
- holder_ = GetRoot();
- holder_map_ = handle(holder_->map(), isolate_);
Next();
}
@@ -61,12 +65,15 @@ class LookupIterator FINAL BASE_EMBEDDED {
Configuration configuration = PROTOTYPE_CHAIN)
: configuration_(ComputeConfiguration(configuration, name)),
state_(NOT_FOUND),
- property_details_(NONE, v8::internal::DATA, 0),
+ exotic_index_state_(ExoticIndexState::kUninitialized),
+ interceptor_state_(InterceptorState::kUninitialized),
+ property_details_(PropertyDetails::Empty()),
isolate_(name->GetIsolate()),
name_(name),
- holder_map_(holder->map(), isolate_),
receiver_(receiver),
holder_(holder),
+ holder_map_(holder_->map(), isolate_),
+ initial_holder_(holder_),
number_(DescriptorArray::kNotFound) {
Next();
}
@@ -95,11 +102,11 @@ class LookupIterator FINAL BASE_EMBEDDED {
DCHECK(IsFound());
return Handle<T>::cast(holder_);
}
- Handle<JSReceiver> GetRoot() const;
+ static Handle<JSReceiver> GetRoot(Handle<Object> receiver, Isolate* isolate);
bool HolderIsReceiverOrHiddenPrototype() const;
/* ACCESS_CHECK */
- bool HasAccess(v8::AccessType access_type) const;
+ bool HasAccess() const;
/* PROPERTY */
void PrepareForDataProperty(Handle<Object> value);
@@ -131,29 +138,29 @@ class LookupIterator FINAL BASE_EMBEDDED {
int GetAccessorIndex() const;
int GetConstantIndex() const;
Handle<PropertyCell> GetPropertyCell() const;
- Handle<PropertyCell> GetTransitionPropertyCell() const {
- DCHECK_EQ(TRANSITION, state_);
- return Handle<PropertyCell>::cast(transition_);
- }
Handle<Object> GetAccessors() const;
Handle<Object> GetDataValue() const;
// Usually returns the value that was passed in, but may perform
// non-observable modifications on it, such as internalize strings.
Handle<Object> WriteDataValue(Handle<Object> value);
-
- // Checks whether the receiver is an indexed exotic object
- // and name is a special numeric index.
- bool IsSpecialNumericIndex() const;
-
void InternalizeName();
private:
+ enum class InterceptorState {
+ kUninitialized,
+ kSkipNonMasking,
+ kProcessNonMasking
+ };
+
Handle<Map> GetReceiverMap() const;
MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
inline State LookupInHolder(Map* map, JSReceiver* holder);
+ void RestartLookupForNonMaskingInterceptors();
+ State LookupNonMaskingInterceptorInHolder(Map* map, JSReceiver* holder);
Handle<Object> FetchValue() const;
void ReloadPropertyInformation();
+ bool SkipInterceptor(JSObject* holder);
bool IsBootstrapping() const;
@@ -185,19 +192,24 @@ class LookupIterator FINAL BASE_EMBEDDED {
}
}
+ enum class ExoticIndexState { kUninitialized, kNoIndex, kIndex };
+ bool IsIntegerIndexedExotic(JSReceiver* holder);
+
// If configuration_ becomes mutable, update
// HolderIsReceiverOrHiddenPrototype.
- Configuration configuration_;
+ const Configuration configuration_;
State state_;
bool has_property_;
+ ExoticIndexState exotic_index_state_;
+ InterceptorState interceptor_state_;
PropertyDetails property_details_;
- Isolate* isolate_;
+ Isolate* const isolate_;
Handle<Name> name_;
- Handle<Map> holder_map_;
Handle<Object> transition_;
- Handle<Object> receiver_;
+ const Handle<Object> receiver_;
Handle<JSReceiver> holder_;
-
+ Handle<Map> holder_map_;
+ const Handle<JSReceiver> initial_holder_;
int number_;
};
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 93a5563c19..324702bb3a 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -69,6 +69,11 @@ const kMaxYear = 1000000;
const kMinMonth = -10000000;
const kMaxMonth = 10000000;
+# Safe maximum number of arguments to push to stack, when multiplied by
+# pointer size. Used by Function.prototype.apply(), Reflect.apply() and
+# Reflect.construct().
+const kSafeArgumentsLength = 0x800000;
+
# Strict mode flags for passing to %SetProperty
const kSloppyMode = 0;
const kStrictMode = 1;
@@ -111,7 +116,6 @@ macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
macro IS_UNDETECTABLE(arg) = (%_IsUndetectableObject(arg));
-macro FLOOR(arg) = $floor(arg);
# Macro for ECMAScript 5 queries of the type:
# "Type(O) is object."
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index cc478d3448..b802de0f46 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -2,58 +2,59 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-"use strict";
+var rngstate; // Initialized to a Uint32Array during genesis.
-// This file relies on the fact that the following declarations have been made
-// in runtime.js:
-// var $Object = global.Object;
+var $abs;
+var $exp;
+var $floor;
+var $max;
+var $min;
-// Keep reference to original values of some global properties. This
-// has the added benefit that the code in this file is isolated from
-// changes to these properties.
-var $floor = MathFloor;
-var $abs = MathAbs;
+(function() {
-// Instance class name can only be set on functions. That is the only
-// purpose for MathConstructor.
-function MathConstructor() {}
-var $Math = new MathConstructor();
+"use strict";
-// -------------------------------------------------------------------
+%CheckIsBootstrapping();
+
+var GlobalObject = global.Object;
+var GlobalArray = global.Array;
+
+//-------------------------------------------------------------------
// ECMA 262 - 15.8.2.1
function MathAbs(x) {
- if (%_IsSmi(x)) return x >= 0 ? x : -x;
- x = TO_NUMBER_INLINE(x);
- if (x === 0) return 0; // To handle -0.
- return x > 0 ? x : -x;
+ x = +x;
+ if (x > 0) return x;
+ return 0 - x;
}
// ECMA 262 - 15.8.2.2
function MathAcosJS(x) {
- return %MathAcos(TO_NUMBER_INLINE(x));
+ return %_MathAcos(+x);
}
// ECMA 262 - 15.8.2.3
function MathAsinJS(x) {
- return %MathAsin(TO_NUMBER_INLINE(x));
+ return %_MathAsin(+x);
}
// ECMA 262 - 15.8.2.4
function MathAtanJS(x) {
- return %MathAtan(TO_NUMBER_INLINE(x));
+ return %_MathAtan(+x);
}
// ECMA 262 - 15.8.2.5
// The naming of y and x matches the spec, as does the order in which
// ToNumber (valueOf) is called.
function MathAtan2JS(y, x) {
- return %MathAtan2(TO_NUMBER_INLINE(y), TO_NUMBER_INLINE(x));
+ y = +y;
+ x = +x;
+ return %_MathAtan2(y, x);
}
// ECMA 262 - 15.8.2.6
function MathCeil(x) {
- return -MathFloor(-x);
+ return -%_MathFloor(-x);
}
// ECMA 262 - 15.8.2.8
@@ -62,19 +63,8 @@ function MathExp(x) {
}
// ECMA 262 - 15.8.2.9
-function MathFloor(x) {
- x = TO_NUMBER_INLINE(x);
- // It's more common to call this with a positive number that's out
- // of range than negative numbers; check the upper bound first.
- if (x < 0x80000000 && x > 0) {
- // Numbers in the range [0, 2^31) can be floored by converting
- // them to an unsigned 32-bit value using the shift operator.
- // We avoid doing so for -0, because the result of Math.floor(-0)
- // has to be -0, which wouldn't be the case with the shift.
- return TO_UINT32(x);
- } else {
- return %MathFloorRT(x);
- }
+function MathFloorJS(x) {
+ return %_MathFloor(+x);
}
// ECMA 262 - 15.8.2.10
@@ -137,12 +127,11 @@ function MathMin(arg1, arg2) { // length == 2
}
// ECMA 262 - 15.8.2.13
-function MathPow(x, y) {
+function MathPowJS(x, y) {
return %_MathPow(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
}
// ECMA 262 - 15.8.2.14
-var rngstate; // Initialized to a Uint32Array during genesis.
function MathRandom() {
var r0 = (MathImul(18030, rngstate[0] & 0xFFFF) + (rngstate[0] >>> 16)) | 0;
rngstate[0] = r0;
@@ -159,8 +148,8 @@ function MathRound(x) {
}
// ECMA 262 - 15.8.2.17
-function MathSqrt(x) {
- return %_MathSqrtRT(TO_NUMBER_INLINE(x));
+function MathSqrtJS(x) {
+ return %_MathSqrt(+x);
}
// Non-standard extension.
@@ -170,7 +159,7 @@ function MathImul(x, y) {
// ES6 draft 09-27-13, section 20.2.2.28.
function MathSign(x) {
- x = TO_NUMBER_INLINE(x);
+ x = +x;
if (x > 0) return 1;
if (x < 0) return -1;
// -0, 0 or NaN.
@@ -179,9 +168,9 @@ function MathSign(x) {
// ES6 draft 09-27-13, section 20.2.2.34.
function MathTrunc(x) {
- x = TO_NUMBER_INLINE(x);
- if (x > 0) return MathFloor(x);
- if (x < 0) return MathCeil(x);
+ x = +x;
+ if (x > 0) return %_MathFloor(x);
+ if (x < 0) return -%_MathFloor(-x);
// -0, 0 or NaN.
return x;
}
@@ -203,9 +192,9 @@ function MathAsinh(x) {
if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
// Idempotent for NaN, +/-0 and +/-Infinity.
if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
- if (x > 0) return MathLog(x + MathSqrt(x * x + 1));
+ if (x > 0) return MathLog(x + %_MathSqrt(x * x + 1));
// This is to prevent numerical errors caused by large negative x.
- return -MathLog(-x + MathSqrt(x * x + 1));
+ return -MathLog(-x + %_MathSqrt(x * x + 1));
}
// ES6 draft 09-27-13, section 20.2.2.3.
@@ -214,7 +203,7 @@ function MathAcosh(x) {
if (x < 1) return NAN;
// Idempotent for NaN and +Infinity.
if (!NUMBER_IS_FINITE(x)) return x;
- return MathLog(x + MathSqrt(x + 1) * MathSqrt(x - 1));
+ return MathLog(x + %_MathSqrt(x + 1) * %_MathSqrt(x - 1));
}
// ES6 draft 09-27-13, section 20.2.2.7.
@@ -256,7 +245,7 @@ function MathHypot(x, y) { // Function length is 2.
compensation = (preliminary - sum) - summand;
sum = preliminary;
}
- return MathSqrt(sum) * max;
+ return %_MathSqrt(sum) * max;
}
// ES6 draft 09-27-13, section 20.2.2.16.
@@ -265,17 +254,8 @@ function MathFroundJS(x) {
}
// ES6 draft 07-18-14, section 20.2.2.11
-function MathClz32(x) {
- x = ToUint32(TO_NUMBER_INLINE(x));
- if (x == 0) return 32;
- var result = 0;
- // Binary search.
- if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
- if ((x & 0xFF000000) === 0) { x <<= 8; result += 8; };
- if ((x & 0xF0000000) === 0) { x <<= 4; result += 4; };
- if ((x & 0xC0000000) === 0) { x <<= 2; result += 2; };
- if ((x & 0x80000000) === 0) { x <<= 1; result += 1; };
- return result;
+function MathClz32JS(x) {
+ return %_MathClz32(x >>> 0);
}
// ES6 draft 09-27-13, section 20.2.2.9.
@@ -293,7 +273,7 @@ macro NEWTON_ITERATION_CBRT(x, approx)
endmacro
function CubeRoot(x) {
- var approx_hi = MathFloor(%_DoubleHi(x) / 3) + 0x2A9F7893;
+ var approx_hi = MathFloorJS(%_DoubleHi(x) / 3) + 0x2A9F7893;
var approx = %_ConstructDouble(approx_hi, 0);
approx = NEWTON_ITERATION_CBRT(x, approx);
approx = NEWTON_ITERATION_CBRT(x, approx);
@@ -303,75 +283,83 @@ function CubeRoot(x) {
// -------------------------------------------------------------------
-function SetUpMath() {
- %CheckIsBootstrapping();
-
- %InternalSetPrototype($Math, $Object.prototype);
- %AddNamedProperty(global, "Math", $Math, DONT_ENUM);
- %FunctionSetInstanceClassName(MathConstructor, 'Math');
-
- %AddNamedProperty($Math, symbolToStringTag, "Math", READ_ONLY | DONT_ENUM);
-
- // Set up math constants.
- InstallConstants($Math, $Array(
- // ECMA-262, section 15.8.1.1.
- "E", 2.7182818284590452354,
- // ECMA-262, section 15.8.1.2.
- "LN10", 2.302585092994046,
- // ECMA-262, section 15.8.1.3.
- "LN2", 0.6931471805599453,
- // ECMA-262, section 15.8.1.4.
- "LOG2E", 1.4426950408889634,
- "LOG10E", 0.4342944819032518,
- "PI", 3.1415926535897932,
- "SQRT1_2", 0.7071067811865476,
- "SQRT2", 1.4142135623730951
- ));
-
- // Set up non-enumerable functions of the Math object and
- // set their names.
- InstallFunctions($Math, DONT_ENUM, $Array(
- "random", MathRandom,
- "abs", MathAbs,
- "acos", MathAcosJS,
- "asin", MathAsinJS,
- "atan", MathAtanJS,
- "ceil", MathCeil,
- "cos", MathCos, // implemented by third_party/fdlibm
- "exp", MathExp,
- "floor", MathFloor,
- "log", MathLog,
- "round", MathRound,
- "sin", MathSin, // implemented by third_party/fdlibm
- "sqrt", MathSqrt,
- "tan", MathTan, // implemented by third_party/fdlibm
- "atan2", MathAtan2JS,
- "pow", MathPow,
- "max", MathMax,
- "min", MathMin,
- "imul", MathImul,
- "sign", MathSign,
- "trunc", MathTrunc,
- "sinh", MathSinh, // implemented by third_party/fdlibm
- "cosh", MathCosh, // implemented by third_party/fdlibm
- "tanh", MathTanh,
- "asinh", MathAsinh,
- "acosh", MathAcosh,
- "atanh", MathAtanh,
- "log10", MathLog10, // implemented by third_party/fdlibm
- "log2", MathLog2, // implemented by third_party/fdlibm
- "hypot", MathHypot,
- "fround", MathFroundJS,
- "clz32", MathClz32,
- "cbrt", MathCbrt,
- "log1p", MathLog1p, // implemented by third_party/fdlibm
- "expm1", MathExpm1 // implemented by third_party/fdlibm
- ));
-
- %SetInlineBuiltinFlag(MathCeil);
- %SetInlineBuiltinFlag(MathRandom);
- %SetInlineBuiltinFlag(MathSin);
- %SetInlineBuiltinFlag(MathCos);
-}
+// Instance class name can only be set on functions. That is the only
+// purpose for MathConstructor.
+function MathConstructor() {}
-SetUpMath();
+var Math = new MathConstructor();
+
+%InternalSetPrototype(Math, GlobalObject.prototype);
+%AddNamedProperty(global, "Math", Math, DONT_ENUM);
+%FunctionSetInstanceClassName(MathConstructor, 'Math');
+
+%AddNamedProperty(Math, symbolToStringTag, "Math", READ_ONLY | DONT_ENUM);
+
+// Set up math constants.
+InstallConstants(Math, GlobalArray(
+ // ECMA-262, section 15.8.1.1.
+ "E", 2.7182818284590452354,
+ // ECMA-262, section 15.8.1.2.
+ "LN10", 2.302585092994046,
+ // ECMA-262, section 15.8.1.3.
+ "LN2", 0.6931471805599453,
+ // ECMA-262, section 15.8.1.4.
+ "LOG2E", 1.4426950408889634,
+ "LOG10E", 0.4342944819032518,
+ "PI", 3.1415926535897932,
+ "SQRT1_2", 0.7071067811865476,
+ "SQRT2", 1.4142135623730951
+));
+
+// Set up non-enumerable functions of the Math object and
+// set their names.
+InstallFunctions(Math, DONT_ENUM, GlobalArray(
+ "random", MathRandom,
+ "abs", MathAbs,
+ "acos", MathAcosJS,
+ "asin", MathAsinJS,
+ "atan", MathAtanJS,
+ "ceil", MathCeil,
+ "exp", MathExp,
+ "floor", MathFloorJS,
+ "log", MathLog,
+ "round", MathRound,
+ "sqrt", MathSqrtJS,
+ "atan2", MathAtan2JS,
+ "pow", MathPowJS,
+ "max", MathMax,
+ "min", MathMin,
+ "imul", MathImul,
+ "sign", MathSign,
+ "trunc", MathTrunc,
+ "tanh", MathTanh,
+ "asinh", MathAsinh,
+ "acosh", MathAcosh,
+ "atanh", MathAtanh,
+ "hypot", MathHypot,
+ "fround", MathFroundJS,
+ "clz32", MathClz32JS,
+ "cbrt", MathCbrt
+));
+
+%SetInlineBuiltinFlag(MathAbs);
+%SetInlineBuiltinFlag(MathAcosJS);
+%SetInlineBuiltinFlag(MathAsinJS);
+%SetInlineBuiltinFlag(MathAtanJS);
+%SetInlineBuiltinFlag(MathAtan2JS);
+%SetInlineBuiltinFlag(MathCeil);
+%SetInlineBuiltinFlag(MathClz32JS);
+%SetInlineBuiltinFlag(MathFloorJS);
+%SetInlineBuiltinFlag(MathRandom);
+%SetInlineBuiltinFlag(MathSign);
+%SetInlineBuiltinFlag(MathSqrtJS);
+%SetInlineBuiltinFlag(MathTrunc);
+
+// Expose to the global scope.
+$abs = MathAbs;
+$exp = MathExp;
+$floor = MathFloorJS;
+$max = MathMax;
+$min = MathMin;
+
+})();
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index b49556c6a1..95110bb95e 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -8,7 +8,8 @@ var kMessages = {
// Error
cyclic_proto: ["Cyclic __proto__ value"],
code_gen_from_strings: ["%0"],
- constructor_special_method: ["Class constructor may not be an accessor"],
+ constructor_is_generator: ["Class constructor may not be a generator"],
+ constructor_is_accessor: ["Class constructor may not be an accessor"],
// TypeError
generator_running: ["Generator is already running"],
unexpected_token: ["Unexpected token ", "%0"],
@@ -24,6 +25,7 @@ var kMessages = {
unterminated_regexp: ["Invalid regular expression: missing /"],
unterminated_template: ["Unterminated template literal"],
unterminated_template_expr: ["Missing } in template expression"],
+ unterminated_arg_list: ["missing ) after argument list"],
regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
multiple_defaults_in_switch: ["More than one default clause in switch statement"],
@@ -50,6 +52,8 @@ var kMessages = {
no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"],
+ reflect_apply_wrong_args: ["Reflect.apply: Arguments list has wrong type"],
+ reflect_construct_wrong_args: ["Reflect.construct: Arguments list has wrong type"],
flags_getter_non_object: ["RegExp.prototype.flags getter called on non-object ", "%0"],
invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
@@ -80,7 +84,7 @@ var kMessages = {
observe_non_object: ["Object.", "%0", " cannot ", "%0", " non-object"],
observe_non_function: ["Object.", "%0", " cannot deliver to non-function"],
observe_callback_frozen: ["Object.observe cannot deliver to a frozen function object"],
- observe_invalid_accept: ["Object.observe accept must be an array of strings."],
+ observe_invalid_accept: ["Third argument to Object.observe must be an array of strings."],
observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
observe_perform_non_string: ["Invalid non-string changeType"],
observe_perform_non_function: ["Cannot perform non-function"],
@@ -155,29 +159,36 @@ var kMessages = {
template_octal_literal: ["Octal literals are not allowed in template strings."],
strict_delete: ["Delete of an unqualified identifier in strict mode."],
strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
- strict_const: ["Use of const in strict mode."],
strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
strict_caller: ["Illegal access to a strict mode caller function."],
- strong_arguments: ["Please don't use 'arguments' in strong mode, use '...args' instead"],
- strong_equal: ["Please don't use '==' or '!=' in strong mode, use '===' or '!==' instead"],
- strong_delete: ["Please don't use 'delete' in strong mode, use maps or sets instead"],
- strong_var: ["Please don't use 'var' in strong mode, use 'let' or 'const' instead"],
- strong_for_in: ["Please don't use 'for'-'in' loops in strong mode, use 'for'-'of' instead"],
- strong_empty: ["Please don't use empty sub-statements in strong mode, make them explicit with '{}' instead"],
+ strong_ellision: ["In strong mode, arrays with holes are deprecated, use maps instead"],
+ strong_arguments: ["In strong mode, 'arguments' is deprecated, use '...args' instead"],
+ strong_equal: ["In strong mode, '==' and '!=' are deprecated, use '===' and '!==' instead"],
+ strong_delete: ["In strong mode, 'delete' is deprecated, use maps or sets instead"],
+ strong_var: ["In strong mode, 'var' is deprecated, use 'let' or 'const' instead"],
+ strong_for_in: ["In strong mode, 'for'-'in' loops are deprecated, use 'for'-'of' instead"],
+ strong_empty: ["In strong mode, empty sub-statements are deprecated, make them explicit with '{}' instead"],
+ strong_use_before_declaration: ["In strong mode, declaring variable '", "%0", "' before its use is required"],
+ strong_unbound_global: ["In strong mode, using an undeclared global variable '", "%0", "' is not allowed"],
+ strong_super_call_missing: ["In strong mode, invoking the super constructor in a subclass is required"],
+ strong_super_call_duplicate: ["In strong mode, invoking the super constructor multiple times is deprecated"],
+ strong_super_call_nested: ["In strong mode, invoking the super constructor nested inside another statement or expression is deprecated"],
+ strong_constructor_return_value: ["In strong mode, returning a value from a constructor is deprecated"],
+ strong_constructor_return_misplaced: ["In strong mode, returning from a constructor before its super constructor invocation is deprecated"],
sloppy_lexical: ["Block-scoped declarations (let, const, function, class) not yet supported outside strict mode"],
malformed_arrow_function_parameter_list: ["Malformed arrow function parameter list"],
generator_poison_pill: ["'caller' and 'arguments' properties may not be accessed on generator functions."],
cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
- harmony_const_assign: ["Assignment to constant variable."],
+ const_assign: ["Assignment to constant variable."],
symbol_to_string: ["Cannot convert a Symbol value to a string"],
symbol_to_primitive: ["Cannot convert a Symbol wrapper object to a primitive value"],
symbol_to_number: ["Cannot convert a Symbol value to a number"],
- invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
module_export_undefined: ["Export '", "%0", "' is not defined in module"],
+ duplicate_export: ["Duplicate export of '", "%0", "'"],
unexpected_super: ["'super' keyword unexpected here"],
extends_value_not_a_function: ["Class extends value ", "%0", " is not a function or null"],
prototype_parent_not_an_object: ["Class extends value does not have valid prototype property ", "%0"],
@@ -232,7 +243,7 @@ function NoSideEffectToString(obj) {
}
return str;
}
- if (IS_SYMBOL(obj)) return %_CallFunction(obj, SymbolToString);
+ if (IS_SYMBOL(obj)) return %_CallFunction(obj, $symbolToString);
if (IS_OBJECT(obj)
&& %GetDataProperty(obj, "toString") === DefaultObjectToString) {
var constructor = %GetDataProperty(obj, "constructor");
@@ -342,7 +353,6 @@ function GetSourceLine(message) {
var start_position = %MessageGetStartPosition(message);
var location = script.locationFromPosition(start_position, true);
if (location == null) return "";
- location.restrict();
return location.sourceText();
}
@@ -439,7 +449,7 @@ function ScriptLocationFromPosition(position,
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- if (end > 0 && %_CallFunction(this.source, end - 1, StringCharAt) == '\r') {
+ if (end > 0 && %_CallFunction(this.source, end - 1, $stringCharAt) == '\r') {
end--;
}
var column = position - start;
@@ -562,7 +572,7 @@ function ScriptSourceLine(opt_line) {
var line_ends = this.line_ends;
var start = line == 0 ? 0 : line_ends[line - 1] + 1;
var end = line_ends[line];
- return %_CallFunction(this.source, start, end, StringSubstring);
+ return %_CallFunction(this.source, start, end, $stringSubstring);
}
@@ -643,57 +653,6 @@ function SourceLocation(script, position, line, column, start, end) {
this.end = end;
}
-var kLineLengthLimit = 78;
-
-/**
- * Restrict source location start and end positions to make the source slice
- * no more that a certain number of characters wide.
- * @param {number} opt_limit The with limit of the source text with a default
- * of 78
- * @param {number} opt_before The number of characters to prefer before the
- * position with a default value of 10 less that the limit
- */
-function SourceLocationRestrict(opt_limit, opt_before) {
- // Find the actual limit to use.
- var limit;
- var before;
- if (!IS_UNDEFINED(opt_limit)) {
- limit = opt_limit;
- } else {
- limit = kLineLengthLimit;
- }
- if (!IS_UNDEFINED(opt_before)) {
- before = opt_before;
- } else {
- // If no before is specified center for small limits and perfer more source
- // before the the position that after for longer limits.
- if (limit <= 20) {
- before = $floor(limit / 2);
- } else {
- before = limit - 10;
- }
- }
- if (before >= limit) {
- before = limit - 1;
- }
-
- // If the [start, end[ interval is too big we restrict
- // it in one or both ends. We make sure to always produce
- // restricted intervals of maximum allowed size.
- if (this.end - this.start > limit) {
- var start_limit = this.position - before;
- var end_limit = this.position + limit - before;
- if (this.start < start_limit && end_limit < this.end) {
- this.start = start_limit;
- this.end = end_limit;
- } else if (this.start < start_limit) {
- this.start = this.end - limit;
- } else {
- this.end = this.start + limit;
- }
- }
-}
-
/**
* Get the source text for a SourceLocation
@@ -704,14 +663,13 @@ function SourceLocationSourceText() {
return %_CallFunction(this.script.source,
this.start,
this.end,
- StringSubstring);
+ $stringSubstring);
}
SetUpLockedPrototype(SourceLocation,
$Array("script", "position", "line", "column", "start", "end"),
$Array(
- "restrict", SourceLocationRestrict,
"sourceText", SourceLocationSourceText
)
);
@@ -752,7 +710,7 @@ function SourceSliceSourceText() {
return %_CallFunction(this.script.source,
this.from_position,
this.to_position,
- StringSubstring);
+ $stringSubstring);
}
SetUpLockedPrototype(SourceSlice,
@@ -768,7 +726,6 @@ function GetPositionInLine(message) {
var start_position = %MessageGetStartPosition(message);
var location = script.locationFromPosition(start_position, false);
if (location == null) return -1;
- location.restrict();
return start_position - location.start;
}
@@ -830,16 +787,13 @@ function CallSiteGetFunction() {
function CallSiteGetFunctionName() {
// See if the function knows its own name
- var name = GET_PRIVATE(this, CallSiteFunctionKey).name;
- if (name) {
- return name;
- }
- name = %FunctionGetInferredName(GET_PRIVATE(this, CallSiteFunctionKey));
+ var fun = GET_PRIVATE(this, CallSiteFunctionKey);
+ var name = %FunctionGetDebugName(fun);
if (name) {
return name;
}
// Maybe this is an evaluation?
- var script = %FunctionGetScript(GET_PRIVATE(this, CallSiteFunctionKey));
+ var script = %FunctionGetScript(fun);
if (script && script.compilation_type == COMPILATION_TYPE_EVAL) {
return "eval";
}
@@ -966,12 +920,12 @@ function CallSiteToString() {
var methodName = this.getMethodName();
if (functionName) {
if (typeName &&
- %_CallFunction(functionName, typeName, StringIndexOfJS) != 0) {
+ %_CallFunction(functionName, typeName, $stringIndexOf) != 0) {
line += typeName + ".";
}
line += functionName;
if (methodName &&
- (%_CallFunction(functionName, "." + methodName, StringIndexOfJS) !=
+ (%_CallFunction(functionName, "." + methodName, $stringIndexOf) !=
functionName.length - methodName.length - 1)) {
line += " [as " + methodName + "]";
}
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 1fdb3e97e5..7b6b3f8c76 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -200,6 +200,39 @@ Address Assembler::break_address_from_return_address(Address pc) {
}
+void Assembler::set_target_internal_reference_encoded_at(Address pc,
+ Address target) {
+ // Encoded internal references are lui/ori load of 32-bit abolute address.
+ Instr instr_lui = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr_ori = Assembler::instr_at(pc + 1 * Assembler::kInstrSize);
+ DCHECK(Assembler::IsLui(instr_lui));
+ DCHECK(Assembler::IsOri(instr_ori));
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+ int32_t imm = reinterpret_cast<int32_t>(target);
+ DCHECK((imm & 3) == 0);
+ Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+
+ // Currently used only by deserializer, and all code will be flushed
+ // after complete deserialization, no need to flush on each reference.
+}
+
+
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ DCHECK(IsLui(instr_at(pc)));
+ set_target_internal_reference_encoded_at(pc, target);
+ } else {
+ DCHECK(mode == RelocInfo::INTERNAL_REFERENCE);
+ Memory::Address_at(pc) = target;
+ }
+}
+
+
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@@ -229,12 +262,35 @@ void RelocInfo::set_target_object(Object* target,
}
-Address RelocInfo::target_reference() {
+Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::target_internal_reference() {
+ if (rmode_ == INTERNAL_REFERENCE) {
+ return Memory::Address_at(pc_);
+ } else {
+ // Encoded internal references are lui/ori load of 32-bit abolute address.
+ DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
+ Instr instr_lui = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
+ Instr instr_ori = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+ DCHECK(Assembler::IsLui(instr_lui));
+ DCHECK(Assembler::IsOri(instr_ori));
+ int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
+ imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ return reinterpret_cast<Address>(imm);
+ }
+}
+
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@@ -307,8 +363,8 @@ Address RelocInfo::call_address() {
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ // debug-mips.cc BreakLocation::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocation::SetDebugBreakAtSlot().
return Assembler::target_address_at(pc_, host_);
}
@@ -317,8 +373,8 @@ void RelocInfo::set_call_address(Address target) {
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ // debug-mips.cc BreakLocation::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocation::SetDebugBreakAtSlot().
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -346,11 +402,16 @@ void RelocInfo::set_call_object(Object* target) {
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) ||
- IsCodeTarget(rmode_) ||
- IsRuntimeEntry(rmode_) ||
- IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, host_, NULL);
+ DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ if (IsInternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsInternalReferenceEncoded(rmode_)) {
+ Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
+ } else {
+ Assembler::set_target_address_at(pc_, host_, NULL);
+ }
}
@@ -383,6 +444,9 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
+ mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -408,6 +472,9 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
+ mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index e7cfd57006..c26a8514bb 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -40,7 +40,6 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/mips/assembler-mips-inl.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -214,27 +213,6 @@ bool RelocInfo::IsInConstantPool() {
}
-// Patch the code at the current address with the supplied instructions.
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED_MIPS();
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
@@ -663,14 +641,14 @@ bool Assembler::IsAndImmediate(Instr instr) {
}
-int Assembler::target_at(int32_t pos, bool is_internal) {
+int Assembler::target_at(int pos, bool is_internal) {
Instr instr = instr_at(pos);
if (is_internal) {
if (instr == 0) {
return kEndOfChain;
} else {
int32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
- int32_t delta = instr_address - instr;
+ int delta = static_cast<int>(instr_address - instr);
DCHECK(pos > delta);
return pos - delta;
}
@@ -684,6 +662,8 @@ int Assembler::target_at(int32_t pos, bool is_internal) {
return (imm18 + pos);
}
}
+ // Check we have a branch or jump instruction.
+ DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmectic shifts for signed integers.
if (IsBranch(instr)) {
@@ -711,7 +691,7 @@ int Assembler::target_at(int32_t pos, bool is_internal) {
DCHECK(pos > delta);
return pos - delta;
}
- } else if (IsJ(instr)) {
+ } else {
int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
if (imm28 == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
@@ -719,13 +699,10 @@ int Assembler::target_at(int32_t pos, bool is_internal) {
} else {
uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
instr_address &= kImm28Mask;
- int32_t delta = instr_address - imm28;
+ int delta = static_cast<int>(instr_address - imm28);
DCHECK(pos > delta);
return pos - delta;
}
- } else {
- UNREACHABLE();
- return 0;
}
}
@@ -747,6 +724,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
return;
}
+ DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
if (IsBranch(instr)) {
int32_t imm18 = target_pos - (pos + kBranchPCOffset);
DCHECK((imm18 & 3) == 0);
@@ -770,7 +748,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
instr_lui | ((imm & kHiMask) >> kLuiShift));
instr_at_put(pos + 1 * Assembler::kInstrSize,
instr_ori | (imm & kImm16Mask));
- } else if (IsJ(instr)) {
+ } else {
uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
imm28 &= kImm28Mask;
DCHECK((imm28 & 3) == 0);
@@ -780,8 +758,6 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos,
DCHECK(is_uint26(imm26));
instr_at_put(pos, instr | (imm26 & kImm26Mask));
- } else {
- UNREACHABLE();
}
}
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 89af82ad1a..d86f0d71fe 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -41,8 +41,8 @@
#include <set>
#include "src/assembler.h"
+#include "src/compiler.h"
#include "src/mips/constants-mips.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -546,6 +546,11 @@ class Assembler : public AssemblerBase {
target);
}
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
@@ -590,6 +595,8 @@ class Assembler : public AssemblerBase {
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceInstructions = 7;
+ static const int kJSReturnSequenceLength =
+ kJSReturnSequenceInstructions * kInstrSize;
static const int kDebugBreakSlotInstructions = 4;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -1020,7 +1027,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
+ void RecordDeoptReason(const int reason, const SourcePosition position);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
@@ -1129,10 +1136,10 @@ class Assembler : public AssemblerBase {
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
- int target_at(int32_t pos, bool is_internal);
+ int target_at(int pos, bool is_internal);
// Patch branch instruction at pos to branch to given branch target pos.
- void target_at_put(int32_t pos, int32_t target_pos, bool is_internal);
+ void target_at_put(int pos, int target_pos, bool is_internal);
// Say if we need to relocate with this mode.
bool MustUseReg(RelocInfo::Mode rmode);
@@ -1184,6 +1191,9 @@ class Assembler : public AssemblerBase {
}
private:
+ inline static void set_target_internal_reference_encoded_at(Address pc,
+ Address target);
+
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static const int kBufferCheckInterval = 1*KB/2;
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 42a0bbe58b..9bdc1e1bd8 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -941,7 +941,9 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Push function as parameter to the runtime call.
__ Push(a1, a1);
// Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+ __ LoadRoot(
+ at, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ push(at);
__ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
@@ -1349,49 +1351,100 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+ const int calleeOffset) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause a2 to become negative.
+ __ Subu(a2, sp, a2);
+ // Check if the arguments will overflow the stack.
+ __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
+ // Signed comparison.
+ __ Branch(&okay, gt, a2, Operand(t3));
+
+ // Out of stack space.
+ __ lw(a1, MemOperand(fp, calleeOffset));
+ __ Push(a1, v0);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+
+ __ bind(&okay);
+}
+
+
+static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int argumentsOffset,
+ const int indexOffset,
+ const int limitOffset) {
+ Label entry, loop;
+ __ lw(a0, MemOperand(fp, indexOffset));
+ __ Branch(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // a0: current argument index
+ __ bind(&loop);
+ __ lw(a1, MemOperand(fp, argumentsOffset));
+ __ Push(a1, a0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(v0);
+
+ // Use inline caching to access the arguments.
+ __ lw(a0, MemOperand(fp, indexOffset));
+ __ Addu(a0, a0, Operand(1 << kSmiTagSize));
+ __ sw(a0, MemOperand(fp, indexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ lw(a1, MemOperand(fp, limitOffset));
+ __ Branch(&loop, ne, a0, Operand(a1));
+
+ // On exit, the pushed arguments count is in a0, untagged
+ __ SmiUntag(a0);
+}
+
+
+// Used by FunctionApply and ReflectApply
+static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
+ const int kFormalParameters = targetIsArgument ? 3 : 2;
+ const int kStackSize = kFormalParameters + 1;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ const int kFunctionOffset = kReceiverOffset + kPointerSize;
+
__ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
__ push(a0);
- __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
+ __ lw(a0, MemOperand(fp, kArgumentsOffset)); // Get the args array.
__ push(a0);
// Returns (in v0) number of arguments to copy to stack as Smi.
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
- // Make a2 the space we have left. The stack might already be overflowed
- // here which will cause a2 to become negative.
- __ subu(a2, sp, a2);
- // Check if the arguments will overflow the stack.
- __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize);
- __ Branch(&okay, gt, a2, Operand(t3)); // Signed comparison.
-
- // Out of stack space.
- __ lw(a1, MemOperand(fp, kFunctionOffset));
- __ Push(a1, v0);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
+ if (targetIsArgument) {
+ __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ } else {
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ }
+
+ // Returns the result in v0.
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current limit and index.
- __ bind(&okay);
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
__ mov(a1, zero_reg);
__ Push(v0, a1); // Limit and initial index.
// Get the receiver.
- __ lw(a0, MemOperand(fp, kRecvOffset));
+ __ lw(a0, MemOperand(fp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
@@ -1447,36 +1500,12 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(a0);
// Copy all arguments from the array to the stack.
- Label entry, loop;
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Branch(&entry);
-
- // Load the current argument from the arguments array and push it to the
- // stack.
- // a0: current argument index
- __ bind(&loop);
- __ lw(a1, MemOperand(fp, kArgsOffset));
- __ Push(a1, a0);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(v0);
-
- // Use inline caching to access the arguments.
- __ lw(a0, MemOperand(fp, kIndexOffset));
- __ Addu(a0, a0, Operand(1 << kSmiTagSize));
- __ sw(a0, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ lw(a1, MemOperand(fp, kLimitOffset));
- __ Branch(&loop, ne, a0, Operand(a1));
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
ParameterCount actual(a0);
- __ sra(a0, a0, kSmiTagSize);
__ lw(a1, MemOperand(fp, kFunctionOffset));
__ GetObjectType(a1, a2, a2);
__ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -1485,7 +1514,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
frame_scope.GenerateLeaveFrame();
__ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+ __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
// Call the function proxy.
__ bind(&call_proxy);
@@ -1499,7 +1528,89 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
__ Ret(USE_DELAY_SLOT);
- __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+ __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
+}
+
+
+static void Generate_ConstructHelper(MacroAssembler* masm) {
+ const int kFormalParameters = 3;
+ const int kStackSize = kFormalParameters + 1;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
+ const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+
+ // If newTarget is not supplied, set it to constructor
+ Label validate_arguments;
+ __ lw(a0, MemOperand(fp, kNewTargetOffset));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&validate_arguments, ne, a0, Operand(at));
+ __ lw(a0, MemOperand(fp, kFunctionOffset));
+ __ sw(a0, MemOperand(fp, kNewTargetOffset));
+
+ // Validate arguments
+ __ bind(&validate_arguments);
+ __ lw(a0, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(a0);
+ __ lw(a0, MemOperand(fp, kArgumentsOffset)); // get the args array
+ __ push(a0);
+ __ lw(a0, MemOperand(fp, kNewTargetOffset)); // get the new.target
+ __ push(a0);
+ // Returns argument count in v0.
+ __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+
+ // Returns result in v0.
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
+
+ // Push current limit and index.
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ __ push(v0); // limit
+ __ mov(a1, zero_reg); // initial index
+ __ push(a1);
+ // Push newTarget and callee functions
+ __ lw(a0, MemOperand(fp, kNewTargetOffset));
+ __ push(a0);
+ __ lw(a0, MemOperand(fp, kFunctionOffset));
+ __ push(a0);
+
+ // Copy all arguments from the array to the stack.
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+
+ // Use undefined feedback vector
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ lw(a1, MemOperand(fp, kFunctionOffset));
+
+ // Call the function.
+ CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ // Leave internal frame.
+ }
+ __ jr(ra);
+ __ Addu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, false);
+}
+
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, true);
+}
+
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ Generate_ConstructHelper(masm);
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index c4fc383552..837b5a495b 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -12,6 +12,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@@ -1121,13 +1122,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ LoadRoot(t0, Heap::kExceptionRootIndex);
__ Branch(&exception_returned, eq, t0, Operand(v0));
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
-
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
Label okay;
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
__ li(a2, Operand(pending_exception_address));
__ lw(a2, MemOperand(a2));
__ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
@@ -1147,25 +1147,52 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ bind(&exception_returned);
- // Retrieve the pending exception.
- __ li(a2, Operand(pending_exception_address));
- __ lw(v0, MemOperand(a2));
-
- // Clear the pending exception.
- __ li(a3, Operand(isolate()->factory()->the_hole_value()));
- __ sw(a3, MemOperand(a2));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- Label throw_termination_exception;
- __ LoadRoot(t0, Heap::kTerminationExceptionRootIndex);
- __ Branch(&throw_termination_exception, eq, v0, Operand(t0));
-
- // Handle normal exception.
- __ Throw(v0);
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
+
+ // Ask the runtime for help to determine the handler. This will set v0 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, 0, a0);
+ __ mov(a0, zero_reg);
+ __ mov(a1, zero_reg);
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(find_handler, 3);
+ }
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(v0);
+ // Retrieve the handler context, SP and FP.
+ __ li(cp, Operand(pending_handler_context_address));
+ __ lw(cp, MemOperand(cp));
+ __ li(sp, Operand(pending_handler_sp_address));
+ __ lw(sp, MemOperand(sp));
+ __ li(fp, Operand(pending_handler_fp_address));
+ __ lw(fp, MemOperand(fp));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label zero;
+ __ Branch(&zero, eq, cp, Operand(zero_reg));
+ __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&zero);
+
+ // Compute the handler entry address and jump to it.
+ __ li(a1, Operand(pending_handler_code_address));
+ __ lw(a1, MemOperand(a1));
+ __ li(a2, Operand(pending_handler_offset_address));
+ __ lw(a2, MemOperand(a2));
+ __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Addu(t9, a1, a2);
+ __ Jump(t9);
}
@@ -1252,7 +1279,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
@@ -1261,10 +1288,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ __ PushStackHandler();
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bal(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
@@ -1309,7 +1335,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Call(t9);
// Unlink this frame from the handler chain.
- __ PopTryHandler();
+ __ PopStackHandler();
__ bind(&exit); // v0 holds result
// Check if the current stack frame is marked as the outermost JS frame.
@@ -1352,12 +1378,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics ||
- (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
- result.is(VectorLoadICDescriptor::SlotRegister())));
+ !scratch.is(VectorLoadICDescriptor::VectorRegister()));
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1368,7 +1390,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1938,8 +1960,11 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ lw(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
+ Label skip_decrement;
+ __ Branch(&skip_decrement, eq, a1, Operand(Smi::FromInt(0)));
// Subtract 1 from smi-tagged arguments count.
__ Subu(a1, a1, Operand(2));
+ __ bind(&skip_decrement);
}
__ sw(a1, MemOperand(sp, 0));
__ sll(at, a1, kPointerSizeLog2 - kSmiTagSize);
@@ -2054,7 +2079,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2338,17 +2363,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(v0, MemOperand(a2, 0));
__ Branch(&runtime, eq, v0, Operand(a1));
- __ sw(a1, MemOperand(a2, 0)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
- Label termination_exception;
- __ Branch(&termination_exception, eq, v0, Operand(a0));
-
- __ Throw(v0);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(v0);
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ bind(&failure);
// For failure and exception return null.
@@ -2444,7 +2460,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3031,7 +3047,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
+ MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@@ -3045,7 +3061,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
// Consumed by runtime conversion function:
- __ Push(object_, index_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ } else {
+ __ Push(object_, index_);
+ }
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
@@ -3056,9 +3077,13 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
-
__ Move(index_, v0);
- __ pop(object_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister(), object_);
+ } else {
+ __ pop(object_);
+ }
// Reload the instance type.
__ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
@@ -3385,7 +3410,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// v0: original string
@@ -3579,7 +3604,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@@ -3889,7 +3914,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ bind(&miss);
@@ -4529,15 +4554,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorLoadStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawLoadStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorKeyedLoadStub stub(isolate());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawKeyedLoadStub stub(isolate());
+ stub.GenerateForTrampoline(masm);
}
@@ -4555,6 +4580,243 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
+void VectorRawLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register feedback, Register scratch1,
+ Register scratch2, Register scratch3,
+ bool is_polymorphic, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+
+ Register receiver_map = scratch1;
+ Register cached_map = scratch2;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&compare_map);
+ __ lw(cached_map,
+ FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+ __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
+ // found, now call handler.
+ Register handler = feedback;
+ __ lw(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+
+ Register length = scratch3;
+ __ bind(&start_polymorphic);
+ __ lw(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+ if (!is_polymorphic) {
+ // If the IC could be monomorphic we have to make sure we don't go past the
+ // end of the feedback array.
+ __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
+ }
+
+ Register too_far = length;
+ Register pointer_reg = feedback;
+
+ // +-----+------+------+-----+-----+ ... ----+
+ // | map | len | wm0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch3
+ // also need receiver_map (aka scratch1)
+ // use cached_map (scratch2) to look in the weak map values.
+ __ sll(at, length, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(too_far, feedback, Operand(at));
+ __ Addu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Addu(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ lw(cached_map, MemOperand(pointer_reg));
+ __ lw(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
+ __ lw(handler, MemOperand(pointer_reg, kPointerSize));
+ __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ __ bind(&prepare_next);
+ __ Addu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
+ __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register weak_cell, Register scratch,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+ Register receiver_map = scratch;
+ Register cached_map = weak_cell;
+
+ // Move the weak map into the weak_cell register.
+ __ lw(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Branch(miss, ne, cached_map, Operand(receiver_map));
+
+ Register handler = weak_cell;
+ __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(handler, vector, Operand(at));
+ __ lw(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ __ bind(&compare_smi_map);
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(miss, ne, at, Operand(weak_cell));
+ __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(handler, vector, Operand(at));
+ __ lw(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ Addu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+}
+
+
+void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
+ Register name = VectorLoadICDescriptor::NameRegister(); // a2
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
+ Register feedback = t0;
+ Register scratch1 = t1;
+
+ __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(feedback, vector, Operand(at));
+ __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
+ __ Branch(&try_array, ne, at, Operand(scratch1));
+ HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
+ &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&not_array, ne, at, Operand(scratch1));
+ HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, t4,
+ t5, true, &miss);
+
+ __ bind(&not_array);
+ __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+ __ Branch(&miss, ne, at, Operand(feedback));
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+ false, receiver, name, feedback,
+ scratch1, t4, t5);
+
+ __ bind(&miss);
+ LoadIC::GenerateMiss(masm);
+}
+
+
+void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
+ Register key = VectorLoadICDescriptor::NameRegister(); // a2
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
+ Register feedback = t0;
+ Register scratch1 = t1;
+
+ __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(feedback, vector, Operand(at));
+ __ lw(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ lw(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
+ __ Branch(&try_array, ne, at, Operand(scratch1));
+ __ JumpIfNotSmi(key, &miss);
+ HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
+ &miss);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&not_array, ne, at, Operand(scratch1));
+ // We have a polymorphic element handler.
+ __ JumpIfNotSmi(key, &miss);
+
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, t4,
+ t5, true, &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+ __ Branch(&try_poly_name, ne, at, Operand(feedback));
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ Branch(&miss, ne, key, Operand(feedback));
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ sll(at, slot, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(feedback, vector, Operand(at));
+ __ lw(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, t4,
+ t5, false, &miss);
+
+ __ bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -5011,7 +5273,6 @@ static void CallApiFunctionAndReturn(
}
Label promote_scheduled_exception;
- Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
@@ -5032,13 +5293,8 @@ static void CallApiFunctionAndReturn(
__ lw(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
- // Check if the function scheduled an exception.
+ // Leave the API exit frame.
__ bind(&leave_exit_frame);
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
- __ lw(t1, MemOperand(at));
- __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
- __ bind(&exception_handled);
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
@@ -5051,16 +5307,20 @@ static void CallApiFunctionAndReturn(
} else {
__ li(s0, Operand(stack_space));
}
- __ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN,
+ __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
stack_space_offset != kInvalidStackOffset);
+ // Check if the function scheduled an exception.
+ __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+ __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ lw(t1, MemOperand(at));
+ __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
+
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
- }
- __ jmp(&exception_handled);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 5b3591c81b..a14fac8d06 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -14,12 +14,7 @@
namespace v8 {
namespace internal {
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
+void BreakLocation::SetDebugBreakAtReturn() {
// Mips return sequence:
// mov sp, fp
// lw fp, sp(0)
@@ -31,7 +26,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// Make sure this constant matches the number if instrucntions we emit.
DCHECK(Assembler::kJSReturnSequenceInstructions == 7);
- CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+ CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
// li and Call pseudo-instructions emit two instructions each.
patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())));
@@ -45,29 +40,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
}
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceInstructions);
-}
-
-
-// A debug break in the exit code is identified by the JS frame exit code
-// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
+void BreakLocation::SetDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
// Patch the code changing the debug break slot code from:
// nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
@@ -77,20 +50,13 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// to a call to the debug break slot code.
// li t9, address (lui t9 / ori t9 instruction pair)
// call t9 (jalr t9 / nop instruction pair)
- CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())));
patcher.masm()->Call(v8::internal::t9);
}
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 3dfc64a605..ab237c35ce 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -132,7 +132,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Unlike on ARM we don't save all the registers, just the useful ones.
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 5666f642f9..633a887b5b 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -205,11 +205,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-inline void StackHandler::SetFp(Address slot, Address fp) {
- Memory::Address_at(slot) = fp;
-}
-
-
} } // namespace v8::internal
#endif
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index ee8475a9eb..9f3655232b 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -115,7 +115,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@@ -204,7 +205,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1.
bool need_write_barrier = true;
- if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+ if (info->scope()->is_script_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
@@ -249,6 +250,11 @@ void FullCodeGenerator::Generate() {
}
}
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
@@ -257,6 +263,11 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
+ if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
+ --num_parameters;
+ ++rest_index;
+ }
+
__ Addu(a3, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ li(a2, Operand(Smi::FromInt(num_parameters)));
@@ -291,10 +302,6 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
@@ -1513,7 +1520,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ li(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
- CallLoadIC(CONTEXTUAL);
+ CallGlobalLoadIC(var->name());
context()->Plug(v0);
break;
}
@@ -2161,7 +2168,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
__ mov(a0, v0);
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
__ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter
__ Push(load_name, a3, a0); // "throw", iter, except
@@ -2172,17 +2178,18 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(a0); // result
- __ PushTryHandler(StackHandler::CATCH, expr->index());
- const int handler_size = StackHandlerConstants::kSize;
+ EnterTryBlock(expr->index(), &l_catch);
+ const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(a0); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ mov(a0, v0);
__ jmp(&l_resume);
__ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + handler_size;
+ const int generator_object_depth = kPointerSize + try_block_size;
__ lw(a0, MemOperand(sp, generator_object_depth));
__ push(a0); // g
+ __ Push(Smi::FromInt(expr->index())); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
__ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
@@ -2190,13 +2197,13 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a1, cp);
__ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
kRAHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(v0); // result
EmitReturnSequence();
__ mov(a0, v0);
__ bind(&l_resume); // received in a0
- __ PopTryHandler();
+ ExitTryBlock(expr->index());
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2544,6 +2551,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
__ push(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ push(v0);
+ }
+
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@@ -2689,24 +2706,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ lw(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ li(a0, Operand(var->name()));
- __ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, a1);
- __ lw(a2, location);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a2, Operand(at));
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2722,6 +2721,22 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, a1);
+ __ lw(a3, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&const_error, ne, a3, Operand(at));
+ __ li(a3, Operand(var->name()));
+ __ push(a3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2742,8 +2757,31 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ li(a0, Operand(var->name()));
+ __ Push(v0, cp, a0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, a1);
+ __ lw(a2, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a2, Operand(at));
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ }
+ // Silently ignore store in sloppy mode.
}
}
@@ -2877,7 +2915,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
- __ Push(isolate()->factory()->undefined_value());
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ push(at);
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -3229,7 +3268,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- if (!ValidateSuperCall(expr)) return;
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(result_register(), new_target_var);
__ Push(result_register());
@@ -3739,9 +3777,10 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset));
- __ GetObjectType(v0, a1, a1);
- __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
+ Register instance_type = a2;
+ __ GetMapConstructor(v0, v0, a1, instance_type);
+ __ Branch(&non_function_constructor, ne, instance_type,
+ Operand(JS_FUNCTION_TYPE));
// v0 now contains the constructor function. Grab the
// instance class name from there.
@@ -4052,7 +4091,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4101,7 +4140,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4287,7 +4326,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ bind(&done);
context()->Plug(v0);
@@ -4573,18 +4612,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- if (expr->function() != NULL &&
- expr->function()->intrinsic_type == Runtime::INLINE) {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
+ Comment cmnt(masm_, "[ CallRuntime");
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
__ lw(receiver, GlobalObjectOperand());
@@ -4607,7 +4639,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ sw(v0, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
- int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@@ -4622,15 +4653,29 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, v0);
+
} else {
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ const Runtime::Function* function = expr->function();
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: { \
+ Comment cmnt(masm_, "[ Inline" #Name); \
+ return Emit##Name(expr); \
+ }
+ FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- context()->Plug(v0);
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(v0);
+ }
+ }
}
}
@@ -5273,19 +5318,6 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ li(at, Operand(pending_message_obj));
__ lw(a1, MemOperand(at));
__ push(a1);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ li(at, Operand(has_pending_message));
- __ lw(a1, MemOperand(at));
- __ SmiTag(a1);
- __ push(a1);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ li(at, Operand(pending_message_script));
- __ lw(a1, MemOperand(at));
- __ push(a1);
}
@@ -5293,19 +5325,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(a1));
// Restore pending message from stack.
__ pop(a1);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ li(at, Operand(pending_message_script));
- __ sw(a1, MemOperand(at));
-
- __ pop(a1);
- __ SmiUntag(a1);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ li(at, Operand(has_pending_message));
- __ sw(a1, MemOperand(at));
-
- __ pop(a1);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ li(at, Operand(pending_message_obj));
@@ -5325,34 +5344,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ lw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ PopTryHandler();
- __ Call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc,
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index b8cae81e0d..ec6c63f833 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -227,6 +227,12 @@ void InternalArrayConstructorDescriptor::Initialize(
}
+void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, a0};
data->Initialize(arraysize(registers), registers, NULL);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 0dea629d3a..d7b3511888 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -30,6 +30,7 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -142,7 +143,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
+ if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -346,50 +347,39 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
- if (needs_frame.is_bound()) {
- __ Branch(&needs_frame);
- } else {
- __ bind(&needs_frame);
- Comment(";;; call deopt with frame");
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(at);
- __ Addu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ bind(&call_deopt_entry);
- // Add the base address to the offset previously loaded in
- // entry_offset.
- __ Addu(entry_offset, entry_offset,
- Operand(ExternalReference::ForDeoptEntry(base)));
- __ Call(entry_offset);
- }
+ Comment(";;; call deopt with frame");
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ Call(&needs_frame);
} else {
- // The last entry can fall through into `call_deopt_entry`, avoiding a
- // branch.
- bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
-
- if (need_branch) __ Branch(&call_deopt_entry);
+ __ Call(&call_deopt_entry);
}
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
}
- if (!call_deopt_entry.is_bound()) {
- Comment(";;; call deopt");
- __ bind(&call_deopt_entry);
+ if (needs_frame.is_linked()) {
+ __ bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(at);
+ __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ }
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
+ Comment(";;; call deopt");
+ __ bind(&call_deopt_entry);
- // Add the base address to the offset previously loaded in entry_offset.
- __ Addu(entry_offset, entry_offset,
- Operand(ExternalReference::ForDeoptEntry(base)));
- __ Call(entry_offset);
+ if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ RestoreCallerDoubles();
}
+
+ // Add the base address to the offset previously loaded in entry_offset.
+ __ Addu(entry_offset, entry_offset,
+ Operand(ExternalReference::ForDeoptEntry(base)));
+ __ Jump(entry_offset);
}
__ RecordComment("]");
@@ -856,8 +846,8 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ bind(&skip);
}
- Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
@@ -865,6 +855,7 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
@@ -2656,14 +2647,15 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+ Register instance_type = scratch1();
+ DCHECK(!instance_type.is(temp));
+ __ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
- __ GetObjectType(temp, temp2, temp2);
if (String::Equals(class_name, isolate()->factory()->Object_string())) {
- __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
} else {
- __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
}
// temp now contains the constructor function. Grab the
@@ -2769,8 +2761,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// root array to force relocation to be able to later patch with
// the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ li(at, Operand(Handle<Object>(cell)));
- __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
+ __ li(at, Operand(cell));
+ __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
__ BranchShort(&cache_miss, ne, map, Operand(at));
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
@@ -2910,17 +2902,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
- __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
- }
-}
-
-
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@@ -2950,37 +2931,12 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Register cell = scratch0();
-
- // Load the cell.
- __ li(cell, Operand(instr->hydrogen()->cell().handle()));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We use a temp to check the payload.
- Register payload = ToRegister(instr->temp());
- __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, payload, Operand(at));
- }
-
- // Store the value.
- __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-}
-
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3077,8 +3033,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL,
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3388,7 +3345,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4280,7 +4239,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
+ Handle<Code> ic =
+ StoreIC::initialize_stub(isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4511,8 +4472,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5250,7 +5212,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ li(at, Operand(Handle<Object>(cell)));
+ __ li(at, Operand(cell));
__ lw(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
} else {
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 8d1b45fa30..b710ef9fde 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -2087,14 +2087,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object =
@@ -2109,16 +2101,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to check the value in the cell in the case where we perform
- // a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
- : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 1ccba14bc8..ede4cbed91 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -100,7 +100,6 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
@@ -141,7 +140,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1667,13 +1665,6 @@ class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@@ -1695,21 +1686,6 @@ class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 972530ee35..406348f2f5 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -86,6 +86,7 @@ void MacroAssembler::LoadRoot(Register destination,
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
sw(source, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -94,6 +95,7 @@ void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Branch(2, NegateCondition(cond), src1, src2);
sw(source, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -1549,6 +1551,18 @@ void MacroAssembler::BranchF(Label* target,
}
+void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
+ if (IsFp64Mode()) {
+ DCHECK(!src_low.is(at));
+ mfhc1(at, dst);
+ mtc1(src_low, dst);
+ mthc1(at, dst);
+ } else {
+ mtc1(src_low, dst);
+ }
+}
+
+
void MacroAssembler::Move(FPURegister dst, float imm) {
li(at, Operand(bit_cast<int32_t>(imm)));
mtc1(at, dst);
@@ -3221,47 +3235,22 @@ void MacroAssembler::DebugBreak() {
// ---------------------------------------------------------------------------
// Exception handling.
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
+void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // For the JSEntry handler, we must preserve a0-a3 and s0.
- // t1-t3 are available. We will build up the handler from the bottom by
- // pushing on the stack.
- // Set up the code object (t1) and the state (t2) for pushing.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- li(t1, Operand(CodeObject()), CONSTANT_SIZE);
- li(t2, Operand(state));
-
- // Push the frame pointer, context, state, and code object.
- if (kind == StackHandler::JS_ENTRY) {
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- // The second zero_reg indicates no context.
- // The first zero_reg is the NULL frame pointer.
- // The operands are reversed to match the order of MultiPush/Pop.
- Push(zero_reg, zero_reg, t2, t1);
- } else {
- MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
- }
// Link the current handler as the next handler.
li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
lw(t1, MemOperand(t2));
push(t1);
+
// Set this new handler as the current one.
sw(sp, MemOperand(t2));
}
-void MacroAssembler::PopTryHandler() {
+void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(a1);
Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
@@ -3270,101 +3259,6 @@ void MacroAssembler::PopTryHandler() {
}
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // v0 = exception, a1 = code object, a2 = state.
- lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table.
- Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- srl(a2, a2, StackHandler::kKindWidth); // Handler index.
- sll(a2, a2, kPointerSizeLog2);
- Addu(a2, a3, a2);
- lw(a2, MemOperand(a2)); // Smi-tagged offset.
- Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- sra(t9, a2, kSmiTagSize);
- Addu(t9, t9, a1);
- Jump(t9); // Jump.
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in v0.
- Move(v0, value);
-
- // Drop the stack pointer to the top of the top handler.
- li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
- isolate())));
- lw(sp, MemOperand(a3));
-
- // Restore the next handler.
- pop(a2);
- sw(a2, MemOperand(a3));
-
- // Get the code object (a1) and state (a2). Restore the context and frame
- // pointer.
- MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
- // or cp.
- Label done;
- Branch(&done, eq, cp, Operand(zero_reg));
- sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- bind(&done);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in v0.
- if (!value.is(v0)) {
- mov(v0, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- lw(sp, MemOperand(a3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind);
- bind(&fetch_next);
- lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
- And(a2, a2, Operand(StackHandler::KindField::kMask));
- Branch(&fetch_next, ne, a2, Operand(zero_reg));
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(a2);
- sw(a2, MemOperand(a3));
-
- // Get the code object (a1) and state (a2). Clear the context and frame
- // pointer (0 was saved in the handler).
- MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
-
- JumpToHandlerEntry();
-}
-
-
void MacroAssembler::Allocate(int object_size,
Register result,
Register scratch1,
@@ -4321,6 +4215,20 @@ void MacroAssembler::IsObjectNameType(Register object,
// Support functions.
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+ Register temp, Register temp2) {
+ Label done, loop;
+ lw(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
+ bind(&loop);
+ JumpIfSmi(result, &done);
+ GetObjectType(result, temp, temp2);
+ Branch(&done, ne, temp2, Operand(MAP_TYPE));
+ lw(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
+ Branch(&loop);
+ bind(&done);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -4373,7 +4281,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+ GetMapConstructor(result, result, scratch, scratch);
}
// All done.
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 02845e2bbd..481c7d4a52 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -241,10 +241,16 @@ class MacroAssembler: public Assembler {
Mfhc1(dst_high, src);
}
+ inline void FmoveHigh(FPURegister dst, Register src_high) {
+ Mthc1(src_high, dst);
+ }
+
inline void FmoveLow(Register dst_low, FPURegister src) {
mfc1(dst_low, src);
}
+ void FmoveLow(FPURegister dst, Register src_low);
+
inline void Move(FPURegister dst, Register src_low, Register src_high) {
mtc1(src_low, dst);
Mthc1(src_high, dst);
@@ -963,19 +969,12 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// Exception handling.
- // Push a new try handler and link into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
- // Unlink the stack handler on top of the stack from the try handler chain.
+ // Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
- void PopTryHandler();
-
- // Passes thrown value to the handler of top of the try handler chain.
- void Throw(Register value);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain.
- void ThrowUncatchable(Register value);
+ void PopStackHandler();
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
@@ -998,6 +997,11 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// Support functions.
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done, and |temp2| its instance type.
+ void GetMapConstructor(Register result, Register map, Register temp,
+ Register temp2);
+
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
@@ -1649,10 +1653,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register bitmap_reg,
Register mask_reg);
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 79f337d3df..7ea3841935 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1935,7 +1935,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// Logical right-rotate of a word by a fixed number of bits. This
// is special case of SRL instruction, added in MIPS32 Release 2.
// RS field is equal to 00001.
- *alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ *alu_out = base::bits::RotateRight32(rt_u, sa);
}
break;
case SRA:
@@ -1953,7 +1953,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// Logical right-rotate of a word by a variable number of bits.
// This is special case od SRLV instruction, added in MIPS32
// Release 2. SA field is equal to 00001.
- *alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ *alu_out = base::bits::RotateRight32(rt_u, rs_u);
}
break;
case SRAV:
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index dede337e2a..76117d08e3 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -194,6 +194,43 @@ Address Assembler::break_address_from_return_address(Address pc) {
}
+void Assembler::set_target_internal_reference_encoded_at(Address pc,
+ Address target) {
+ // Encoded internal references are lui/ori load of 48-bit absolute address.
+ Instr instr_lui = Assembler::instr_at(pc + 0 * Assembler::kInstrSize);
+ Instr instr_ori = Assembler::instr_at(pc + 1 * Assembler::kInstrSize);
+ Instr instr_ori2 = Assembler::instr_at(pc + 3 * Assembler::kInstrSize);
+ DCHECK(Assembler::IsLui(instr_lui));
+ DCHECK(Assembler::IsOri(instr_ori));
+ DCHECK(Assembler::IsOri(instr_ori2));
+ instr_lui &= ~kImm16Mask;
+ instr_ori &= ~kImm16Mask;
+ instr_ori2 &= ~kImm16Mask;
+ int64_t imm = reinterpret_cast<int64_t>(target);
+ DCHECK((imm & 3) == 0);
+ Assembler::instr_at_put(pc + 0 * Assembler::kInstrSize,
+ instr_lui | ((imm >> 32) & kImm16Mask));
+ Assembler::instr_at_put(pc + 1 * Assembler::kInstrSize,
+ instr_ori | ((imm >> 16) & kImm16Mask));
+ Assembler::instr_at_put(pc + 3 * Assembler::kInstrSize,
+ instr_ori | (imm & kImm16Mask));
+ // Currently used only by deserializer, and all code will be flushed
+ // after complete deserialization, no need to flush on each reference.
+}
+
+
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ DCHECK(IsLui(instr_at(pc)));
+ set_target_internal_reference_encoded_at(pc, target);
+ } else {
+ DCHECK(mode == RelocInfo::INTERNAL_REFERENCE);
+ Memory::Address_at(pc) = target;
+ }
+}
+
+
Object* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
@@ -223,12 +260,38 @@ void RelocInfo::set_target_object(Object* target,
}
-Address RelocInfo::target_reference() {
+Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
+Address RelocInfo::target_internal_reference() {
+ if (rmode_ == INTERNAL_REFERENCE) {
+ return Memory::Address_at(pc_);
+ } else {
+ // Encoded internal references are lui/ori load of 48-bit absolute address.
+ DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
+ Instr instr_lui = Assembler::instr_at(pc_ + 0 * Assembler::kInstrSize);
+ Instr instr_ori = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+ Instr instr_ori2 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize);
+ DCHECK(Assembler::IsLui(instr_lui));
+ DCHECK(Assembler::IsOri(instr_ori));
+ DCHECK(Assembler::IsOri(instr_ori2));
+ int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 32;
+ imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 16;
+ imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask));
+ return reinterpret_cast<Address>(imm);
+ }
+}
+
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@@ -301,8 +364,8 @@ Address RelocInfo::call_address() {
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ // debug-mips.cc BreakLocation::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocation::SetDebugBreakAtSlot().
return Assembler::target_address_at(pc_, host_);
}
@@ -311,8 +374,8 @@ void RelocInfo::set_call_address(Address target) {
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes mips patched return sequence per
- // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
- // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ // debug-mips.cc BreakLocation::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocation::SetDebugBreakAtSlot().
Assembler::set_target_address_at(pc_, host_, target);
if (host() != NULL) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -340,11 +403,16 @@ void RelocInfo::set_call_object(Object* target) {
void RelocInfo::WipeOut() {
- DCHECK(IsEmbeddedObject(rmode_) ||
- IsCodeTarget(rmode_) ||
- IsRuntimeEntry(rmode_) ||
- IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, host_, NULL);
+ DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ if (IsInternalReference(rmode_)) {
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsInternalReferenceEncoded(rmode_)) {
+ Assembler::set_target_internal_reference_encoded_at(pc_, nullptr);
+ } else {
+ Assembler::set_target_address_at(pc_, host_, NULL);
+ }
}
@@ -380,6 +448,9 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
+ mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -405,6 +476,9 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
+ mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 4ce970da33..9501371b63 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -32,14 +32,12 @@
// modified significantly by Google Inc.
// Copyright 2012 the V8 project authors. All rights reserved.
-
#include "src/v8.h"
#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
#include "src/mips64/assembler-mips64-inl.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -192,27 +190,6 @@ bool RelocInfo::IsInConstantPool() {
}
-// Patch the code at the current address with the supplied instructions.
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED_MIPS();
-}
-
-
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
@@ -635,7 +612,7 @@ bool Assembler::IsAndImmediate(Instr instr) {
}
-int64_t Assembler::target_at(int64_t pos, bool is_internal) {
+int Assembler::target_at(int pos, bool is_internal) {
if (is_internal) {
int64_t* p = reinterpret_cast<int64_t*>(buffer_ + pos);
int64_t address = *p;
@@ -643,7 +620,8 @@ int64_t Assembler::target_at(int64_t pos, bool is_internal) {
return kEndOfChain;
} else {
int64_t instr_address = reinterpret_cast<int64_t>(p);
- int64_t delta = instr_address - address;
+ DCHECK(instr_address - address < INT_MAX);
+ int delta = static_cast<int>(instr_address - address);
DCHECK(pos > delta);
return pos - delta;
}
@@ -689,7 +667,8 @@ int64_t Assembler::target_at(int64_t pos, bool is_internal) {
return kEndOfChain;
} else {
uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
- int64_t delta = instr_address - imm;
+ DCHECK(instr_address - imm < INT_MAX);
+ int delta = static_cast<int>(instr_address - imm);
DCHECK(pos > delta);
return pos - delta;
}
@@ -701,7 +680,7 @@ int64_t Assembler::target_at(int64_t pos, bool is_internal) {
} else {
uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
instr_address &= kImm28Mask;
- int64_t delta = instr_address - imm28;
+ int delta = static_cast<int>(instr_address - imm28);
DCHECK(pos > delta);
return pos - delta;
}
@@ -709,8 +688,7 @@ int64_t Assembler::target_at(int64_t pos, bool is_internal) {
}
-void Assembler::target_at_put(int64_t pos, int64_t target_pos,
- bool is_internal) {
+void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
if (is_internal) {
uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
*reinterpret_cast<uint64_t*>(buffer_ + pos) = imm;
@@ -796,7 +774,7 @@ void Assembler::print(Label* L) {
void Assembler::bind_to(Label* L, int pos) {
DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
- int32_t trampoline_pos = kInvalidSlotPos;
+ int trampoline_pos = kInvalidSlotPos;
bool is_internal = false;
if (L->is_linked() && !trampoline_emitted_) {
unbound_labels_count_--;
@@ -804,8 +782,8 @@ void Assembler::bind_to(Label* L, int pos) {
}
while (L->is_linked()) {
- int32_t fixup_pos = L->pos();
- int32_t dist = pos - fixup_pos;
+ int fixup_pos = L->pos();
+ int dist = pos - fixup_pos;
is_internal = internal_reference_positions_.find(fixup_pos) !=
internal_reference_positions_.end();
next(L, is_internal); // Call next before overwriting link with target at
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index 5ad98f6cd8..1ca1a8714f 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -41,8 +41,8 @@
#include <set>
#include "src/assembler.h"
+#include "src/compiler.h"
#include "src/mips64/constants-mips64.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -539,6 +539,11 @@ class Assembler : public AssemblerBase {
target);
}
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
@@ -554,14 +559,11 @@ class Assembler : public AssemblerBase {
static const int kSpecialTargetSize = 0;
// Number of consecutive instructions used to store 32bit/64bit constant.
- // Before jump-optimizations, this constant was used in
- // RelocInfo::target_address_address() function to tell serializer address of
- // the instruction that follows LUI/ORI instruction pair. Now, with new jump
- // optimization, where jump-through-register instruction that usually
- // follows LUI/ORI pair is substituted with J/JAL, this constant equals
- // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
- static const int kInstructionsFor32BitConstant = 3;
- static const int kInstructionsFor64BitConstant = 5;
+ // This constant was used in RelocInfo::target_address_address() function
+ // to tell serializer address of the instruction that follows
+ // LUI/ORI instruction pair.
+ static const int kInstructionsFor32BitConstant = 2;
+ static const int kInstructionsFor64BitConstant = 4;
// Distance between the instruction referring to the address of the call
// target and the return address.
@@ -584,6 +586,8 @@ class Assembler : public AssemblerBase {
// Number of instructions used for the JS return sequence. The constant is
// used by the debugger to patch the JS return sequence.
static const int kJSReturnSequenceInstructions = 7;
+ static const int kJSReturnSequenceLength =
+ kJSReturnSequenceInstructions * kInstrSize;
static const int kDebugBreakSlotInstructions = 6;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -1060,7 +1064,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
+ void RecordDeoptReason(const int reason, const SourcePosition position);
static int RelocateInternalReference(RelocInfo::Mode rmode, byte* pc,
intptr_t pc_delta);
@@ -1165,13 +1169,16 @@ class Assembler : public AssemblerBase {
// the relocation info.
TypeFeedbackId recorded_ast_id_;
+ inline static void set_target_internal_reference_encoded_at(Address pc,
+ Address target);
+
int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
- int64_t target_at(int64_t pos, bool is_internal);
+ int target_at(int pos, bool is_internal);
// Patch branch instruction at pos to branch to given branch target pos.
- void target_at_put(int64_t pos, int64_t target_pos, bool is_internal);
+ void target_at_put(int pos, int target_pos, bool is_internal);
// Say if we need to relocate with this mode.
bool MustUseReg(RelocInfo::Mode rmode);
diff --git a/deps/v8/src/mips64/builtins-mips64.cc b/deps/v8/src/mips64/builtins-mips64.cc
index 89fda10b05..24d4a800e5 100644
--- a/deps/v8/src/mips64/builtins-mips64.cc
+++ b/deps/v8/src/mips64/builtins-mips64.cc
@@ -950,7 +950,9 @@ static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
// Push function as parameter to the runtime call.
__ Push(a1, a1);
// Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+ __ LoadRoot(
+ at, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ push(at);
__ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
@@ -1357,49 +1359,100 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+ const int calleeOffset) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause a2 to become negative.
+ __ dsubu(a2, sp, a2);
+ // Check if the arguments will overflow the stack.
+ __ SmiScale(a7, v0, kPointerSizeLog2);
+ __ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison.
+
+ // Out of stack space.
+ __ ld(a1, MemOperand(fp, calleeOffset));
+ __ Push(a1, v0);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+
+ __ bind(&okay);
+}
+
+
+static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int argumentsOffset,
+ const int indexOffset,
+ const int limitOffset) {
+ Label entry, loop;
+ __ ld(a0, MemOperand(fp, indexOffset));
+ __ Branch(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // a0: current argument index
+ __ bind(&loop);
+ __ ld(a1, MemOperand(fp, argumentsOffset));
+ __ Push(a1, a0);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(v0);
+
+ // Use inline caching to access the arguments.
+ __ ld(a0, MemOperand(fp, indexOffset));
+ __ Daddu(a0, a0, Operand(Smi::FromInt(1)));
+ __ sd(a0, MemOperand(fp, indexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ ld(a1, MemOperand(fp, limitOffset));
+ __ Branch(&loop, ne, a0, Operand(a1));
+
+ // On exit, the pushed arguments count is in a0, untagged
+ __ SmiUntag(a0);
+}
+
+
+// Used by FunctionApply and ReflectApply
+static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
+ const int kFormalParameters = targetIsArgument ? 3 : 2;
+ const int kStackSize = kFormalParameters + 1;
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ const int kFunctionOffset = kReceiverOffset + kPointerSize;
+
__ ld(a0, MemOperand(fp, kFunctionOffset)); // Get the function.
__ push(a0);
- __ ld(a0, MemOperand(fp, kArgsOffset)); // Get the args array.
+ __ ld(a0, MemOperand(fp, kArgumentsOffset)); // Get the args array.
__ push(a0);
+
// Returns (in v0) number of arguments to copy to stack as Smi.
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
- // Make a2 the space we have left. The stack might already be overflowed
- // here which will cause a2 to become negative.
- __ dsubu(a2, sp, a2);
- // Check if the arguments will overflow the stack.
- __ SmiScale(a7, v0, kPointerSizeLog2);
- __ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison.
-
- // Out of stack space.
- __ ld(a1, MemOperand(fp, kFunctionOffset));
- __ Push(a1, v0);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
+ if (targetIsArgument) {
+ __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ } else {
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ }
+
+ // Returns the result in v0.
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current limit and index.
- __ bind(&okay);
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
__ mov(a1, zero_reg);
__ Push(v0, a1); // Limit and initial index.
// Get the receiver.
- __ ld(a0, MemOperand(fp, kRecvOffset));
+ __ ld(a0, MemOperand(fp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
@@ -1455,36 +1508,12 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(a0);
// Copy all arguments from the array to the stack.
- Label entry, loop;
- __ ld(a0, MemOperand(fp, kIndexOffset));
- __ Branch(&entry);
-
- // Load the current argument from the arguments array and push it to the
- // stack.
- // a0: current argument index
- __ bind(&loop);
- __ ld(a1, MemOperand(fp, kArgsOffset));
- __ Push(a1, a0);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(v0);
-
- // Use inline caching to access the arguments.
- __ ld(a0, MemOperand(fp, kIndexOffset));
- __ Daddu(a0, a0, Operand(Smi::FromInt(1)));
- __ sd(a0, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ ld(a1, MemOperand(fp, kLimitOffset));
- __ Branch(&loop, ne, a0, Operand(a1));
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
ParameterCount actual(a0);
- __ SmiUntag(a0);
__ ld(a1, MemOperand(fp, kFunctionOffset));
__ GetObjectType(a1, a2, a2);
__ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
@@ -1493,7 +1522,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
frame_scope.GenerateLeaveFrame();
__ Ret(USE_DELAY_SLOT);
- __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+ __ Daddu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
// Call the function proxy.
__ bind(&call_proxy);
@@ -1507,7 +1536,89 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
}
__ Ret(USE_DELAY_SLOT);
- __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot.
+ __ Daddu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
+}
+
+
+static void Generate_ConstructHelper(MacroAssembler* masm) {
+ const int kFormalParameters = 3;
+ const int kStackSize = kFormalParameters + 1;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
+ const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+
+ // If newTarget is not supplied, set it to constructor
+ Label validate_arguments;
+ __ ld(a0, MemOperand(fp, kNewTargetOffset));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&validate_arguments, ne, a0, Operand(at));
+ __ ld(a0, MemOperand(fp, kFunctionOffset));
+ __ sd(a0, MemOperand(fp, kNewTargetOffset));
+
+ // Validate arguments
+ __ bind(&validate_arguments);
+ __ ld(a0, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(a0);
+ __ ld(a0, MemOperand(fp, kArgumentsOffset)); // get the args array
+ __ push(a0);
+ __ ld(a0, MemOperand(fp, kNewTargetOffset)); // get the new.target
+ __ push(a0);
+ // Returns argument count in v0.
+ __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+
+ // Returns result in v0.
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
+
+ // Push current limit and index.
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ __ push(v0); // limit
+ __ mov(a1, zero_reg); // initial index
+ __ push(a1);
+ // Push newTarget and callee functions
+ __ ld(a0, MemOperand(fp, kNewTargetOffset));
+ __ push(a0);
+ __ ld(a0, MemOperand(fp, kFunctionOffset));
+ __ push(a0);
+
+ // Copy all arguments from the array to the stack.
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+
+ // Use undefined feedback vector
+ __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+ __ ld(a1, MemOperand(fp, kFunctionOffset));
+
+ // Call the function.
+ CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ // Leave internal frame.
+ }
+ __ jr(ra);
+ __ Daddu(sp, sp, Operand(kStackSize * kPointerSize)); // In delay slot.
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, false);
+}
+
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, true);
+}
+
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ Generate_ConstructHelper(masm);
}
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 1a0b97221e..560e2c6e4e 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -11,6 +11,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@@ -1116,13 +1117,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ LoadRoot(a4, Heap::kExceptionRootIndex);
__ Branch(&exception_returned, eq, a4, Operand(v0));
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
-
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
Label okay;
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
__ li(a2, Operand(pending_exception_address));
__ ld(a2, MemOperand(a2));
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
@@ -1142,25 +1142,52 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ bind(&exception_returned);
- // Retrieve the pending exception.
- __ li(a2, Operand(pending_exception_address));
- __ ld(v0, MemOperand(a2));
-
- // Clear the pending exception.
- __ li(a3, Operand(isolate()->factory()->the_hole_value()));
- __ sd(a3, MemOperand(a2));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- Label throw_termination_exception;
- __ LoadRoot(a4, Heap::kTerminationExceptionRootIndex);
- __ Branch(&throw_termination_exception, eq, v0, Operand(a4));
-
- // Handle normal exception.
- __ Throw(v0);
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
+
+ // Ask the runtime for help to determine the handler. This will set v0 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, 0, a0);
+ __ mov(a0, zero_reg);
+ __ mov(a1, zero_reg);
+ __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(find_handler, 3);
+ }
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(v0);
+ // Retrieve the handler context, SP and FP.
+ __ li(cp, Operand(pending_handler_context_address));
+ __ ld(cp, MemOperand(cp));
+ __ li(sp, Operand(pending_handler_sp_address));
+ __ ld(sp, MemOperand(sp));
+ __ li(fp, Operand(pending_handler_fp_address));
+ __ ld(fp, MemOperand(fp));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label zero;
+ __ Branch(&zero, eq, cp, Operand(zero_reg));
+ __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&zero);
+
+ // Compute the handler entry address and jump to it.
+ __ li(a1, Operand(pending_handler_code_address));
+ __ ld(a1, MemOperand(a1));
+ __ li(a2, Operand(pending_handler_offset_address));
+ __ ld(a2, MemOperand(a2));
+ __ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Daddu(t9, a1, a2);
+ __ Jump(t9);
}
@@ -1253,7 +1280,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
@@ -1262,10 +1289,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ __ PushStackHandler();
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the bal(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
@@ -1309,7 +1335,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Call(t9);
// Unlink this frame from the handler chain.
- __ PopTryHandler();
+ __ PopStackHandler();
__ bind(&exit); // v0 holds result
// Check if the current stack frame is marked as the outermost JS frame.
@@ -1352,12 +1378,8 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
Register result = v0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!FLAG_vector_ics ||
- (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
- result.is(VectorLoadICDescriptor::SlotRegister())));
+ !scratch.is(VectorLoadICDescriptor::VectorRegister()));
- // StringCharAtGenerator doesn't use the result register until it's passed
- // the different miss possibilities. If it did, we would have a conflict
- // when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
@@ -1368,7 +1390,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1937,10 +1959,13 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ bind(&adaptor_frame);
__ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
+ Label skip_decrement;
+ __ Branch(&skip_decrement, eq, a1, Operand(Smi::FromInt(0)));
// Subtract 1 from smi-tagged arguments count.
__ SmiUntag(a1);
__ Daddu(a1, a1, Operand(-1));
__ SmiTag(a1);
+ __ bind(&skip_decrement);
}
__ sd(a1, MemOperand(sp, 0));
__ SmiScale(at, a1, kPointerSizeLog2);
@@ -2058,7 +2083,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2374,17 +2399,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ld(v0, MemOperand(a2, 0));
__ Branch(&runtime, eq, v0, Operand(a1));
- __ sd(a1, MemOperand(a2, 0)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
- Label termination_exception;
- __ Branch(&termination_exception, eq, v0, Operand(a0));
-
- __ Throw(v0);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(v0);
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ bind(&failure);
// For failure and exception return null.
@@ -2480,7 +2496,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -3070,7 +3086,7 @@ void CallICStub::GenerateMiss(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
+ MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@@ -3084,7 +3100,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
// Consumed by runtime conversion function:
- __ Push(object_, index_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ } else {
+ __ Push(object_, index_);
+ }
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
@@ -3097,7 +3118,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
// have a chance to overwrite it.
__ Move(index_, v0);
- __ pop(object_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(VectorLoadICDescriptor::SlotRegister(),
+ VectorLoadICDescriptor::VectorRegister(), object_);
+ } else {
+ __ pop(object_);
+ }
// Reload the instance type.
__ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
@@ -3428,7 +3454,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// v0: original string
@@ -3621,7 +3647,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, a4, a5);
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@@ -3931,7 +3957,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ bind(&miss);
@@ -4548,7 +4574,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, a2, &slow_elements);
+ __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, &slow_elements);
__ Ret(USE_DELAY_SLOT);
__ mov(v0, a0);
}
@@ -4572,15 +4598,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorLoadStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawLoadStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorKeyedLoadStub stub(isolate());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawKeyedLoadStub stub(isolate());
+ stub.GenerateForTrampoline(masm);
}
@@ -4598,6 +4624,243 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
+void VectorRawLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register feedback, Register scratch1,
+ Register scratch2, Register scratch3,
+ bool is_polymorphic, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+
+ Register receiver_map = scratch1;
+ Register cached_map = scratch2;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&compare_map);
+ __ ld(cached_map,
+ FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+ __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Branch(&start_polymorphic, ne, receiver_map, Operand(cached_map));
+ // found, now call handler.
+ Register handler = feedback;
+ __ ld(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ Register length = scratch3;
+ __ bind(&start_polymorphic);
+ __ ld(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+ if (!is_polymorphic) {
+ // If the IC could be monomorphic we have to make sure we don't go past the
+ // end of the feedback array.
+ __ Branch(miss, eq, length, Operand(Smi::FromInt(2)));
+ }
+
+ Register too_far = length;
+ Register pointer_reg = feedback;
+
+ // +-----+------+------+-----+-----+ ... ----+
+ // | map | len | wm0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch3
+ // also need receiver_map (aka scratch1)
+ // use cached_map (scratch2) to look in the weak map values.
+ __ SmiScale(too_far, length, kPointerSizeLog2);
+ __ Daddu(too_far, feedback, Operand(too_far));
+ __ Daddu(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ Daddu(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ ld(cached_map, MemOperand(pointer_reg));
+ __ ld(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ Branch(&prepare_next, ne, receiver_map, Operand(cached_map));
+ __ ld(handler, MemOperand(pointer_reg, kPointerSize));
+ __ Daddu(t9, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(t9);
+
+ __ bind(&prepare_next);
+ __ Daddu(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
+ __ Branch(&next_loop, lt, pointer_reg, Operand(too_far));
+
+ // We exhausted our array of map handler pairs.
+ __ Branch(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ Branch(&compare_map);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register weak_cell, Register scratch,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+ Register receiver_map = scratch;
+ Register cached_map = weak_cell;
+
+ // Move the weak map into the weak_cell register.
+ __ ld(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Branch(miss, ne, cached_map, Operand(receiver_map));
+
+ Register handler = weak_cell;
+ __ SmiScale(handler, slot, kPointerSizeLog2);
+ __ Daddu(handler, vector, Operand(handler));
+ __ ld(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(t9);
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ // TODO(mvstanton): does this hold on ARM?
+ __ bind(&compare_smi_map);
+ __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+ __ Branch(miss, ne, weak_cell, Operand(at));
+ __ SmiScale(handler, slot, kPointerSizeLog2);
+ __ Daddu(handler, vector, Operand(handler));
+ __ ld(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ Daddu(t9, handler, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(t9);
+}
+
+
+void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
+ Register name = VectorLoadICDescriptor::NameRegister(); // a2
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
+ Register feedback = a4;
+ Register scratch1 = a5;
+
+ __ SmiScale(feedback, slot, kPointerSizeLog2);
+ __ Daddu(feedback, vector, Operand(feedback));
+ __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
+ __ Branch(&try_array, ne, scratch1, Operand(at));
+ HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
+ &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&not_array, ne, scratch1, Operand(at));
+ HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, a6,
+ a7, true, &miss);
+
+ __ bind(&not_array);
+ __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+ __ Branch(&miss, ne, feedback, Operand(at));
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+ false, receiver, name, feedback,
+ scratch1, a6, a7);
+
+ __ bind(&miss);
+ LoadIC::GenerateMiss(masm);
+}
+
+
+void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // a1
+ Register key = VectorLoadICDescriptor::NameRegister(); // a2
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // a3
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // a0
+ Register feedback = a4;
+ Register scratch1 = a5;
+
+ __ SmiScale(feedback, slot, kPointerSizeLog2);
+ __ Daddu(feedback, vector, Operand(feedback));
+ __ ld(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ ld(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ LoadRoot(at, Heap::kWeakCellMapRootIndex);
+ __ Branch(&try_array, ne, scratch1, Operand(at));
+ __ JumpIfNotSmi(key, &miss);
+ HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
+ &miss);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&not_array, ne, scratch1, Operand(at));
+ // We have a polymorphic element handler.
+ __ JumpIfNotSmi(key, &miss);
+
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, a6,
+ a7, true, &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+ __ Branch(&try_poly_name, ne, feedback, Operand(at));
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ Branch(&miss, ne, key, Operand(feedback));
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ SmiScale(feedback, slot, kPointerSizeLog2);
+ __ Daddu(feedback, vector, Operand(feedback));
+ __ ld(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, a6,
+ a7, false, &miss);
+
+ __ bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
@@ -5056,7 +5319,6 @@ static void CallApiFunctionAndReturn(
}
Label promote_scheduled_exception;
- Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
@@ -5077,13 +5339,8 @@ static void CallApiFunctionAndReturn(
__ ld(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
- // Check if the function scheduled an exception.
+ // Leave the API exit frame.
__ bind(&leave_exit_frame);
- __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
- __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
- __ ld(a5, MemOperand(at));
- __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
- __ bind(&exception_handled);
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
@@ -5095,15 +5352,20 @@ static void CallApiFunctionAndReturn(
} else {
__ li(s0, Operand(stack_space));
}
- __ LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN,
+ __ LeaveExitFrame(false, s0, !restore_context, NO_EMIT_RETURN,
stack_space_offset != kInvalidStackOffset);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+ __ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ ld(a5, MemOperand(at));
+ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
+
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
- }
- __ jmp(&exception_handled);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/mips64/debug-mips64.cc b/deps/v8/src/mips64/debug-mips64.cc
index 0bb0c4a802..8ef247d8c9 100644
--- a/deps/v8/src/mips64/debug-mips64.cc
+++ b/deps/v8/src/mips64/debug-mips64.cc
@@ -14,12 +14,7 @@
namespace v8 {
namespace internal {
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
+void BreakLocation::SetDebugBreakAtReturn() {
// Mips return sequence:
// mov sp, fp
// lw fp, sp(0)
@@ -31,7 +26,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// Make sure this constant matches the number if instructions we emit.
DCHECK(Assembler::kJSReturnSequenceInstructions == 7);
- CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+ CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
// li and Call pseudo-instructions emit 6 + 2 instructions.
patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int64_t>(
debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())),
@@ -44,29 +39,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
}
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceInstructions);
-}
-
-
-// A debug break in the exit code is identified by the JS frame exit code
-// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
+void BreakLocation::SetDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
// Patch the code changing the debug break slot code from:
// nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
@@ -78,7 +51,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// to a call to the debug break slot code.
// li t9, address (4-instruction sequence on mips64)
// call t9 (jalr t9 / nop instruction pair)
- CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
patcher.masm()->li(v8::internal::t9,
Operand(reinterpret_cast<int64_t>(
debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())),
@@ -87,13 +60,6 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
}
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index e77faedd3b..2b55695af8 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -131,7 +131,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Unlike on ARM we don't save all the registers, just the useful ones.
@@ -329,39 +329,66 @@ void Deoptimizer::EntryGenerator::Generate() {
// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 11 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
- Label table_start;
+ Label table_start, done, done_special, trampoline_jump;
__ bind(&table_start);
- for (int i = 0; i < count(); i++) {
- Label start;
- __ bind(&start);
- __ daddiu(sp, sp, -1 * kPointerSize);
- // Jump over the remaining deopt entries (including this one).
- // This code is always reached by calling Jump, which puts the target (label
- // start) into t9.
- const int remaining_entries = (count() - i) * table_entry_size_;
- __ Daddu(t9, t9, remaining_entries);
- // 'at' was clobbered so we can only load the current entry value here.
- __ li(t8, i);
- __ jr(t9); // Expose delay slot.
- __ sd(t8, MemOperand(sp, 0 * kPointerSize)); // In the delay slot.
-
- // Pad the rest of the code.
- while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
- __ nop();
+ int kMaxEntriesBranchReach =
+ (1 << (kImm16Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
+
+ if (count() <= kMaxEntriesBranchReach) {
+ // Common case.
+ for (int i = 0; i < count(); i++) {
+ Label start;
+ __ bind(&start);
+ DCHECK(is_int16(i));
+ __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+
+ DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
- DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
- }
+ DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+ count() * table_entry_size_);
+ __ bind(&done);
+ __ Push(at);
+ } else {
+ // Uncommon case, the branch cannot reach.
+ // Create mini trampoline and adjust id constants to get proper value at
+ // the end of table.
+ for (int i = kMaxEntriesBranchReach; i > 1; i--) {
+ Label start;
+ __ bind(&start);
+ DCHECK(is_int16(i));
+ __ Branch(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
+ __ li(at, -i); // In the delay slot.
+ DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
+ }
+ // Entry with id == kMaxEntriesBranchReach - 1.
+ __ bind(&trampoline_jump);
+ __ Branch(USE_DELAY_SLOT, &done_special);
+ __ li(at, -1);
+
+ for (int i = kMaxEntriesBranchReach; i < count(); i++) {
+ Label start;
+ __ bind(&start);
+ DCHECK(is_int16(i));
+ __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+ }
- DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
- count() * table_entry_size_);
+ DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+ count() * table_entry_size_);
+ __ bind(&done_special);
+ __ daddiu(at, at, kMaxEntriesBranchReach);
+ __ bind(&done);
+ __ Push(at);
+ }
}
diff --git a/deps/v8/src/mips64/frames-mips64.h b/deps/v8/src/mips64/frames-mips64.h
index eaf29c89bb..be732ef5f7 100644
--- a/deps/v8/src/mips64/frames-mips64.h
+++ b/deps/v8/src/mips64/frames-mips64.h
@@ -205,11 +205,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-inline void StackHandler::SetFp(Address slot, Address fp) {
- Memory::Address_at(slot) = fp;
-}
-
-
} } // namespace v8::internal
#endif
diff --git a/deps/v8/src/mips64/full-codegen-mips64.cc b/deps/v8/src/mips64/full-codegen-mips64.cc
index 2d8fc155ed..91d374a2c0 100644
--- a/deps/v8/src/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/mips64/full-codegen-mips64.cc
@@ -115,7 +115,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@@ -201,7 +202,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate context");
// Argument to NewContext is the function, which is still in a1.
bool need_write_barrier = true;
- if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+ if (info->scope()->is_script_scope()) {
__ push(a1);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
@@ -246,6 +247,11 @@ void FullCodeGenerator::Generate() {
}
}
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
@@ -254,6 +260,11 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
+ if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
+ --num_parameters;
+ ++rest_index;
+ }
+
__ Daddu(a3, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
__ li(a2, Operand(Smi::FromInt(num_parameters)));
@@ -288,10 +299,6 @@ void FullCodeGenerator::Generate() {
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
@@ -1511,7 +1518,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ li(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
- CallLoadIC(CONTEXTUAL);
+ CallGlobalLoadIC(var->name());
context()->Plug(v0);
break;
}
@@ -2158,7 +2165,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
__ mov(a0, v0);
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw"
__ ld(a3, MemOperand(sp, 1 * kPointerSize)); // iter
__ Push(a2, a3, a0); // "throw", iter, except
@@ -2169,17 +2175,18 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(a0); // result
- __ PushTryHandler(StackHandler::CATCH, expr->index());
- const int handler_size = StackHandlerConstants::kSize;
+ EnterTryBlock(expr->index(), &l_catch);
+ const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(a0); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ mov(a0, v0);
__ jmp(&l_resume);
__ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + handler_size;
+ const int generator_object_depth = kPointerSize + try_block_size;
__ ld(a0, MemOperand(sp, generator_object_depth));
__ push(a0); // g
+ __ Push(Smi::FromInt(expr->index())); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
__ sd(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
@@ -2187,13 +2194,13 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(a1, cp);
__ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
kRAHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(v0); // result
EmitReturnSequence();
__ mov(a0, v0);
__ bind(&l_resume); // received in a0
- __ PopTryHandler();
+ ExitTryBlock(expr->index());
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2542,6 +2549,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
__ push(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ push(v0);
+ }
+
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@@ -2686,23 +2703,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ li(StoreDescriptor::NameRegister(), Operand(var->name()));
__ ld(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ li(a0, Operand(var->name()));
- __ Push(v0, cp, a0); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, a1);
- __ ld(a2, location);
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(&skip, ne, a2, Operand(at));
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
@@ -2719,6 +2719,22 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
// Perform the assignment.
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, a1);
+ __ ld(a3, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&const_error, ne, a3, Operand(at));
+ __ li(a3, Operand(var->name()));
+ __ push(a3);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2743,8 +2759,31 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ li(a0, Operand(var->name()));
+ __ Push(v0, cp, a0); // Context and name.
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, a1);
+ __ ld(a2, location);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&skip, ne, a2, Operand(at));
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ }
+ // Silently ignore store in sloppy mode.
}
}
@@ -2878,7 +2917,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
- __ Push(isolate()->factory()->undefined_value());
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ push(at);
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -3229,7 +3269,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- if (!ValidateSuperCall(expr)) return;
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(result_register(), new_target_var);
__ Push(result_register());
@@ -3740,9 +3779,10 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ ld(v0, FieldMemOperand(v0, Map::kConstructorOffset));
- __ GetObjectType(v0, a1, a1);
- __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
+ Register instance_type = a2;
+ __ GetMapConstructor(v0, v0, a1, instance_type);
+ __ Branch(&non_function_constructor, ne, instance_type,
+ Operand(JS_FUNCTION_TYPE));
// v0 now contains the constructor function. Grab the
// instance class name from there.
@@ -4054,7 +4094,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4103,7 +4143,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4289,7 +4329,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ bind(&done);
context()->Plug(v0);
@@ -4575,18 +4615,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- if (expr->function() != NULL &&
- expr->function()->intrinsic_type == Runtime::INLINE) {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
+ Comment cmnt(masm_, "[ CallRuntime");
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
__ ld(receiver, GlobalObjectOperand());
@@ -4609,7 +4642,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ sd(v0, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
- int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@@ -4625,14 +4657,27 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
context()->DropAndPlug(1, v0);
} else {
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ const Runtime::Function* function = expr->function();
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: { \
+ Comment cmnt(masm_, "[ Inline" #Name); \
+ return Emit##Name(expr); \
+ }
+ FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- context()->Plug(v0);
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(v0);
+ }
+ }
}
}
@@ -5275,19 +5320,6 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ li(at, Operand(pending_message_obj));
__ ld(a1, MemOperand(at));
__ push(a1);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ li(at, Operand(has_pending_message));
- __ ld(a1, MemOperand(at));
- __ SmiTag(a1);
- __ push(a1);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ li(at, Operand(pending_message_script));
- __ ld(a1, MemOperand(at));
- __ push(a1);
}
@@ -5295,19 +5327,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(a1));
// Restore pending message from stack.
__ pop(a1);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ li(at, Operand(pending_message_script));
- __ sd(a1, MemOperand(at));
-
- __ pop(a1);
- __ SmiUntag(a1);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ li(at, Operand(has_pending_message));
- __ sd(a1, MemOperand(at));
-
- __ pop(a1);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ li(at, Operand(pending_message_obj));
@@ -5327,34 +5346,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ ld(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ PopTryHandler();
- __ Call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc,
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 8d1b9f29e0..b9003af684 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -227,6 +227,12 @@ void InternalArrayConstructorDescriptor::Initialize(
}
+void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, a1, a0};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, a0};
data->Initialize(arraysize(registers), registers, NULL);
diff --git a/deps/v8/src/mips64/lithium-codegen-mips64.cc b/deps/v8/src/mips64/lithium-codegen-mips64.cc
index ae2e792f42..3b19379e80 100644
--- a/deps/v8/src/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/mips64/lithium-codegen-mips64.cc
@@ -6,6 +6,7 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -117,7 +118,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
+ if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -302,7 +303,7 @@ bool LCodeGen::GenerateJumpTable() {
Comment(";;; -------------------- Jump table --------------------");
}
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Label table_start;
+ Label table_start, call_deopt_entry;
__ bind(&table_start);
Label needs_frame;
for (int i = 0; i < jump_table_.length(); i++) {
@@ -313,29 +314,35 @@ bool LCodeGen::GenerateJumpTable() {
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
- if (needs_frame.is_bound()) {
- __ Branch(&needs_frame);
- } else {
- __ bind(&needs_frame);
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Daddu(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ Call(t9);
- }
+ Comment(";;; call deopt with frame");
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ __ Call(&needs_frame);
} else {
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
- __ Call(t9);
+ __ Call(&call_deopt_entry);
}
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
+ }
+ if (needs_frame.is_linked()) {
+ __ bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(at);
+ __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
}
+
+ Comment(";;; call deopt");
+ __ bind(&call_deopt_entry);
+
+ if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ RestoreCallerDoubles();
+ }
+ __ Jump(t9);
+
__ RecordComment("]");
// The deoptimization jump table is the last part of the instruction
@@ -806,8 +813,8 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
__ bind(&skip);
}
- Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
@@ -815,6 +822,7 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
@@ -2362,9 +2370,8 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
Register scratch = scratch0();
__ FmoveHigh(scratch, input_reg);
- __ dsll32(scratch, scratch, 0); // FmoveHigh (mfhc1) sign-extends.
- __ dsrl32(scratch, scratch, 0); // Use only low 32-bits.
- EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
+ EmitBranch(instr, eq, scratch,
+ Operand(static_cast<int32_t>(kHoleNanUpper32)));
}
@@ -2617,14 +2624,15 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ ld(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+ Register instance_type = scratch1();
+ DCHECK(!instance_type.is(temp));
+ __ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
- __ GetObjectType(temp, temp2, temp2);
if (String::Equals(class_name, isolate()->factory()->Object_string())) {
- __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
} else {
- __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
+ __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
}
// temp now contains the constructor function. Grab the
@@ -2730,8 +2738,8 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// root array to force relocation to be able to later patch with
// the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ li(at, Operand(Handle<Object>(cell)));
- __ ld(at, FieldMemOperand(at, PropertyCell::kValueOffset));
+ __ li(at, Operand(cell));
+ __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
__ BranchShort(&cache_miss, ne, map, Operand(at));
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
@@ -2871,17 +2879,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
- __ ld(result, FieldMemOperand(at, Cell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
- }
-}
-
-
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@@ -2911,36 +2908,12 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Register cell = scratch0();
-
- // Load the cell.
- __ li(cell, Operand(instr->hydrogen()->cell().handle()));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We use a temp to check the payload.
- Register payload = ToRegister(instr->temp());
- __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole, payload, Operand(at));
- }
-
- // Store the value.
- __ sd(value, FieldMemOperand(cell, Cell::kValueOffset));
- // Cells are always rescanned, so no write barrier here.
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3052,8 +3025,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL,
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3275,9 +3249,9 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ ldc1(result, MemOperand(scratch));
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lwu(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+ __ FmoveHigh(scratch, result);
DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
- Operand(kHoleNanUpper32));
+ Operand(static_cast<int32_t>(kHoleNanUpper32)));
}
}
@@ -3404,7 +3378,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4312,7 +4288,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
+ Handle<Code> ic =
+ StoreIC::initialize_stub(isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4473,22 +4451,11 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
}
if (instr->NeedsCanonicalization()) {
- Label is_nan;
- // Check for NaN. All NaNs must be canonicalized.
- __ BranchF(NULL, &is_nan, eq, value, value);
- __ Branch(&not_nan);
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ bind(&is_nan);
- __ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ FPUCanonicalizeNaN(double_scratch, value);
__ sdc1(double_scratch, MemOperand(scratch, 0));
- __ Branch(&done);
+ } else {
+ __ sdc1(value, MemOperand(scratch, 0));
}
-
- __ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, 0));
- __ bind(&done);
}
@@ -4576,8 +4543,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5281,7 +5249,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ li(at, Operand(Handle<Object>(cell)));
+ __ li(at, Operand(cell));
__ ld(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
} else {
diff --git a/deps/v8/src/mips64/lithium-mips64.cc b/deps/v8/src/mips64/lithium-mips64.cc
index 1e48881a06..60a837e7c5 100644
--- a/deps/v8/src/mips64/lithium-mips64.cc
+++ b/deps/v8/src/mips64/lithium-mips64.cc
@@ -2085,14 +2085,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object =
@@ -2107,16 +2099,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to check the value in the cell in the case where we perform
- // a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
- : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
diff --git a/deps/v8/src/mips64/lithium-mips64.h b/deps/v8/src/mips64/lithium-mips64.h
index afc84efb8f..6eed2c2391 100644
--- a/deps/v8/src/mips64/lithium-mips64.h
+++ b/deps/v8/src/mips64/lithium-mips64.h
@@ -100,7 +100,6 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
@@ -140,7 +139,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1665,13 +1663,6 @@ class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@@ -1693,21 +1684,6 @@ class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index 4a2261b868..1b8e4c091c 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -89,6 +89,7 @@ void MacroAssembler::LoadRoot(Register destination,
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -97,6 +98,7 @@ void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Branch(2, NegateCondition(cond), src1, src2);
sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -1717,6 +1719,14 @@ void MacroAssembler::BranchF(Label* target,
}
+void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) {
+ DCHECK(!src_low.is(at));
+ mfhc1(at, dst);
+ mtc1(src_low, dst);
+ mthc1(at, dst);
+}
+
+
void MacroAssembler::Move(FPURegister dst, float imm) {
li(at, Operand(bit_cast<int32_t>(imm)));
mtc1(at, dst);
@@ -2112,7 +2122,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Unsigned comparison.
case Ugreater:
if (r2.is(zero_reg)) {
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
bne(scratch, zero_reg, offset);
@@ -2120,7 +2130,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Ugreater_equal:
if (r2.is(zero_reg)) {
- bgez(rs, offset);
+ b(offset);
} else {
sltu(scratch, rs, r2);
beq(scratch, zero_reg, offset);
@@ -2137,7 +2147,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Uless_equal:
if (r2.is(zero_reg)) {
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
beq(scratch, zero_reg, offset);
@@ -2227,7 +2237,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
// Unsigned comparison.
case Ugreater:
if (rt.imm64_ == 0) {
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
@@ -2237,7 +2247,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Ugreater_equal:
if (rt.imm64_ == 0) {
- bgez(rs, offset);
+ b(offset);
} else if (is_int16(rt.imm64_)) {
sltiu(scratch, rs, rt.imm64_);
beq(scratch, zero_reg, offset);
@@ -2264,7 +2274,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
break;
case Uless_equal:
if (rt.imm64_ == 0) {
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
r2 = scratch;
li(r2, rt);
@@ -2366,7 +2376,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- bgtz(rs, offset);
+ bne(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
@@ -2376,7 +2386,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ b(offset);
} else {
sltu(scratch, rs, r2);
offset = shifted_branch_offset(L, false);
@@ -2396,7 +2406,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Uless_equal:
if (r2.is(zero_reg)) {
offset = shifted_branch_offset(L, false);
- b(offset);
+ beq(rs, zero_reg, offset);
} else {
sltu(scratch, r2, rs);
offset = shifted_branch_offset(L, false);
@@ -2518,7 +2528,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
case Ugreater_equal:
if (rt.imm64_ == 0) {
offset = shifted_branch_offset(L, false);
- bgez(rs, offset);
+ b(offset);
} else if (is_int16(rt.imm64_)) {
sltiu(scratch, rs, rt.imm64_);
offset = shifted_branch_offset(L, false);
@@ -3212,150 +3222,31 @@ void MacroAssembler::DebugBreak() {
// ---------------------------------------------------------------------------
// Exception handling.
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
+void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // For the JSEntry handler, we must preserve a0-a3 and s0.
- // a5-a7 are available. We will build up the handler from the bottom by
- // pushing on the stack.
- // Set up the code object (a5) and the state (a6) for pushing.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- li(a5, Operand(CodeObject()), CONSTANT_SIZE);
- li(a6, Operand(state));
-
- // Push the frame pointer, context, state, and code object.
- if (kind == StackHandler::JS_ENTRY) {
- DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
- // The second zero_reg indicates no context.
- // The first zero_reg is the NULL frame pointer.
- // The operands are reversed to match the order of MultiPush/Pop.
- Push(zero_reg, zero_reg, a6, a5);
- } else {
- MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit());
- }
// Link the current handler as the next handler.
li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
ld(a5, MemOperand(a6));
push(a5);
+
// Set this new handler as the current one.
sd(sp, MemOperand(a6));
}
-void MacroAssembler::PopTryHandler() {
+void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
pop(a1);
- Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
+ kPointerSize)));
li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
sd(a1, MemOperand(at));
}
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // v0 = exception, a1 = code object, a2 = state.
- ld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));
- Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- dsrl(a2, a2, StackHandler::kKindWidth); // Handler index.
- dsll(a2, a2, kPointerSizeLog2);
- Daddu(a2, a3, a2);
- ld(a2, MemOperand(a2)); // Smi-tagged offset.
- Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- dsra32(t9, a2, 0);
- Daddu(t9, t9, a1);
- Jump(t9); // Jump.
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in v0.
- Move(v0, value);
-
- // Drop the stack pointer to the top of the top handler.
- li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
- isolate())));
- ld(sp, MemOperand(a3));
-
- // Restore the next handler.
- pop(a2);
- sd(a2, MemOperand(a3));
-
- // Get the code object (a1) and state (a2). Restore the context and frame
- // pointer.
- MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
- // or cp.
- Label done;
- Branch(&done, eq, cp, Operand(zero_reg));
- sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- bind(&done);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in v0.
- if (!value.is(v0)) {
- mov(v0, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ld(sp, MemOperand(a3));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind);
- bind(&fetch_next);
- ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
- And(a2, a2, Operand(StackHandler::KindField::kMask));
- Branch(&fetch_next, ne, a2, Operand(zero_reg));
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(a2);
- sd(a2, MemOperand(a3));
-
- // Get the code object (a1) and state (a2). Clear the context and frame
- // pointer (0 was saved in the handler).
- MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
-
- JumpToHandlerEntry();
-}
-
-
void MacroAssembler::Allocate(int object_size,
Register result,
Register scratch1,
@@ -3865,69 +3756,42 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
- Register scratch3,
Label* fail,
int elements_offset) {
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
- Register mantissa_reg = scratch2;
- Register exponent_reg = scratch3;
+ Label smi_value, done;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
- // Ensure that the object is a heap number
+ // Ensure that the object is a heap number.
CheckMap(value_reg,
scratch1,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- li(scratch1, Operand(kHoleNanUpper32 & HeapNumber::kExponentMask));
- lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
-
- lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- // dsll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
- dsra(scratch1, key_reg, 32 - kDoubleSizeLog2);
- Daddu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
- sw(exponent_reg, FieldMemOperand(scratch1, offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN, Infinity or -Infinity. If fraction is not zero, it's NaN,
- // otherwise it's Infinity or -Infinity, and the non-NaN code path applies.
- lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- LoadRoot(at, Heap::kNanValueRootIndex);
- lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
- lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
- jmp(&have_double_value);
+ // Double value, turn potential sNaN into qNan.
+ DoubleRegister double_result = f0;
+ DoubleRegister double_scratch = f2;
+
+ ldc1(double_result, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ Branch(USE_DELAY_SLOT, &done); // Canonicalization is one instruction.
+ FPUCanonicalizeNaN(double_result, double_result);
bind(&smi_value);
+ // scratch1 is now effective address of the double element.
+ // Untag and transfer.
+ dsrl32(at, value_reg, 0);
+ mtc1(at, double_scratch);
+ cvt_d_w(double_result, double_scratch);
+
+ bind(&done);
Daddu(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset));
- // dsll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
Daddu(scratch1, scratch1, scratch2);
- // scratch1 is now effective address of the double element
-
- Register untagged_value = elements_reg;
- SmiUntag(untagged_value, value_reg);
- mtc1(untagged_value, f2);
- cvt_d_w(f0, f2);
- sdc1(f0, MemOperand(scratch1, 0));
- bind(&done);
+ sdc1(double_result, MemOperand(scratch1, 0));
}
@@ -3999,6 +3863,10 @@ void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
}
+void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+ const DoubleRegister src) {
+ sub_d(dst, src, kDoubleRegZero);
+}
void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
Label* miss) {
@@ -4267,6 +4135,20 @@ void MacroAssembler::IsObjectNameType(Register object,
// Support functions.
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+ Register temp, Register temp2) {
+ Label done, loop;
+ ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
+ bind(&loop);
+ JumpIfSmi(result, &done);
+ GetObjectType(result, temp, temp2);
+ Branch(&done, ne, temp2, Operand(MAP_TYPE));
+ ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
+ Branch(&loop);
+ bind(&done);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -4319,7 +4201,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- ld(result, FieldMemOperand(result, Map::kConstructorOffset));
+ GetMapConstructor(result, result, scratch, scratch);
}
// All done.
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 1e25b334c7..6088b6164e 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -262,10 +262,16 @@ class MacroAssembler: public Assembler {
mfhc1(dst_high, src);
}
+ inline void FmoveHigh(FPURegister dst, Register src_high) {
+ mthc1(src_high, dst);
+ }
+
inline void FmoveLow(Register dst_low, FPURegister src) {
mfc1(dst_low, src);
}
+ void FmoveLow(FPURegister dst, Register src_low);
+
inline void Move(FPURegister dst, Register src_low, Register src_high) {
mtc1(src_low, dst);
mthc1(src_high, dst);
@@ -993,19 +999,12 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// Exception handling.
- // Push a new try handler and link into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
- // Unlink the stack handler on top of the stack from the try handler chain.
+ // Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
- void PopTryHandler();
-
- // Passes thrown value to the handler of top of the try handler chain.
- void Throw(Register value);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain.
- void ThrowUncatchable(Register value);
+ void PopStackHandler();
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
@@ -1028,6 +1027,11 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// Support functions.
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done, and |temp2| its instance type.
+ void GetMapConstructor(Register result, Register map, Register temp,
+ Register temp2);
+
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
@@ -1069,7 +1073,6 @@ class MacroAssembler: public Assembler {
Register elements_reg,
Register scratch1,
Register scratch2,
- Register scratch3,
Label* fail,
int elements_offset = 0);
@@ -1116,6 +1119,10 @@ class MacroAssembler: public Assembler {
Handle<WeakCell> cell, Handle<Code> success,
SmiCheckType smi_check_type);
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+
+
// Get value of the weak cell.
void GetWeakValue(Register value, Handle<WeakCell> cell);
@@ -1715,10 +1722,6 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
Register bitmap_reg,
Register mask_reg);
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index bb39b97cca..9ca57f6948 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -1995,7 +1995,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
*return_addr_reg = instr->RdValue();
break;
case SLL:
- *alu_out = (int32_t)rt << sa;
+ *alu_out = static_cast<int32_t>(rt) << sa;
break;
case DSLL:
*alu_out = rt << sa;
@@ -2007,12 +2007,14 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
if (rs_reg == 0) {
// Regular logical right shift of a word by a fixed number of
// bits instruction. RS field is always equal to 0.
- *alu_out = (uint32_t)rt_u >> sa;
+ // Sign-extend the 32-bit result.
+ *alu_out = static_cast<int32_t>(static_cast<uint32_t>(rt_u) >> sa);
} else {
// Logical right-rotate of a word by a fixed number of bits. This
// is special case of SRL instruction, added in MIPS32 Release 2.
// RS field is equal to 00001.
- *alu_out = ((uint32_t)rt_u >> sa) | ((uint32_t)rt_u << (32 - sa));
+ *alu_out = static_cast<int32_t>(
+ base::bits::RotateRight32((uint32_t)rt_u, sa));
}
break;
case DSRL:
@@ -2040,13 +2042,13 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
if (sa == 0) {
// Regular logical right-shift of a word by a variable number of
// bits instruction. SA field is always equal to 0.
- *alu_out = (uint32_t)rt_u >> rs;
+ *alu_out = static_cast<int32_t>((uint32_t)rt_u >> rs);
} else {
// Logical right-rotate of a word by a variable number of bits.
// This is special case od SRLV instruction, added in MIPS32
// Release 2. SA field is equal to 00001.
- *alu_out =
- ((uint32_t)rt_u >> rs_u) | ((uint32_t)rt_u << (32 - rs_u));
+ *alu_out = static_cast<int32_t>(
+ base::bits::RotateRight32((uint32_t)rt_u, rs_u));
}
break;
case DSRLV:
@@ -2058,7 +2060,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
// Logical right-rotate of a word by a variable number of bits.
// This is special case od SRLV instruction, added in MIPS32
// Release 2. SA field is equal to 00001.
- *alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ *alu_out = base::bits::RotateRight32(rt_u, rs_u);
}
break;
case SRAV:
@@ -2974,7 +2976,7 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
alu_out = (rs < se_imm16) ? 1 : 0;
break;
case SLTIU:
- alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+ alu_out = (rs_u < static_cast<uint64_t>(se_imm16)) ? 1 : 0;
break;
case ANDI:
alu_out = rs & oe_imm16;
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 1848e4f7c3..bfaac73034 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -657,7 +657,7 @@ SymbolMirror.prototype.description = function() {
SymbolMirror.prototype.toText = function() {
- return %_CallFunction(this.value_, builtins.SymbolToString);
+ return %_CallFunction(this.value_, builtins.$symbolToString);
}
diff --git a/deps/v8/src/modules.cc b/deps/v8/src/modules.cc
index eb01cf08e4..267c398c47 100644
--- a/deps/v8/src/modules.cc
+++ b/deps/v8/src/modules.cc
@@ -11,28 +11,39 @@
namespace v8 {
namespace internal {
-// ---------------------------------------------------------------------------
-// Addition.
-void ModuleDescriptor::Add(const AstRawString* name, Zone* zone, bool* ok) {
- void* key = const_cast<AstRawString*>(name);
+void ModuleDescriptor::AddLocalExport(const AstRawString* export_name,
+ const AstRawString* local_name,
+ Zone* zone, bool* ok) {
+ void* key = const_cast<AstRawString*>(export_name);
- ZoneHashMap** map = &exports_;
ZoneAllocationPolicy allocator(zone);
- if (*map == nullptr) {
- *map = new (zone->New(sizeof(ZoneHashMap)))
- ZoneHashMap(ZoneHashMap::PointersMatch,
- ZoneHashMap::kDefaultHashMapCapacity, allocator);
+ if (exports_ == nullptr) {
+ exports_ = new (zone->New(sizeof(ZoneHashMap))) ZoneHashMap(
+ AstRawString::Compare, ZoneHashMap::kDefaultHashMapCapacity, allocator);
}
ZoneHashMap::Entry* p =
- (*map)->Lookup(key, name->hash(), !IsFrozen(), allocator);
+ exports_->Lookup(key, export_name->hash(), !IsFrozen(), allocator);
if (p == nullptr || p->value != nullptr) {
*ok = false;
}
- p->value = key;
+ p->value = const_cast<AstRawString*>(local_name);
+}
+
+
+const AstRawString* ModuleDescriptor::LookupLocalExport(
+ const AstRawString* export_name, Zone* zone) {
+ if (exports_ == nullptr) return nullptr;
+ ZoneAllocationPolicy allocator(zone);
+ ZoneHashMap::Entry* entry =
+ exports_->Lookup(const_cast<AstRawString*>(export_name),
+ export_name->hash(), false, allocator);
+ if (entry == nullptr) return nullptr;
+ DCHECK_NOT_NULL(entry->value);
+ return static_cast<const AstRawString*>(entry->value);
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/modules.h b/deps/v8/src/modules.h
index ac04e47c4d..7dd7e26716 100644
--- a/deps/v8/src/modules.h
+++ b/deps/v8/src/modules.h
@@ -28,7 +28,8 @@ class ModuleDescriptor : public ZoneObject {
// Add a name to the list of exports. If it already exists, or this descriptor
// is frozen, that's an error.
- void Add(const AstRawString* name, Zone* zone, bool* ok);
+ void AddLocalExport(const AstRawString* export_name,
+ const AstRawString* local_name, Zone* zone, bool* ok);
// Do not allow any further refinements, directly or through unification.
void Freeze() { frozen_ = true; }
@@ -57,6 +58,9 @@ class ModuleDescriptor : public ZoneObject {
return index_;
}
+ const AstRawString* LookupLocalExport(const AstRawString* export_name,
+ Zone* zone);
+
// ---------------------------------------------------------------------------
// Iterators.
@@ -67,10 +71,14 @@ class ModuleDescriptor : public ZoneObject {
class Iterator {
public:
bool done() const { return entry_ == NULL; }
- const AstRawString* name() const {
+ const AstRawString* export_name() const {
DCHECK(!done());
return static_cast<const AstRawString*>(entry_->key);
}
+ const AstRawString* local_name() const {
+ DCHECK(!done());
+ return static_cast<const AstRawString*>(entry_->value);
+ }
void Advance() { entry_ = exports_->Next(entry_); }
private:
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
index 01ce8054fd..3ba917ecda 100644
--- a/deps/v8/src/object-observe.js
+++ b/deps/v8/src/object-observe.js
@@ -271,7 +271,7 @@ function ConvertAcceptListToTypeMap(arg) {
return arg;
if (!IS_SPEC_OBJECT(arg))
- throw MakeTypeError("observe_accept_invalid");
+ throw MakeTypeError("observe_invalid_accept");
var len = ToInteger(arg.length);
if (len < 0) len = 0;
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 78a07c737e..f06e7b624c 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -289,9 +289,11 @@ void JSObject::JSObjectVerify() {
if (r.IsSmi()) DCHECK(value->IsSmi());
if (r.IsHeapObject()) DCHECK(value->IsHeapObject());
HeapType* field_type = descriptors->GetFieldType(i);
+ bool type_is_none = field_type->Is(HeapType::None());
+ bool type_is_any = HeapType::Any()->Is(field_type);
if (r.IsNone()) {
- CHECK(field_type->Is(HeapType::None()));
- } else if (!HeapType::Any()->Is(field_type)) {
+ CHECK(type_is_none);
+ } else if (!type_is_any && !(type_is_none && r.IsHeapObject())) {
CHECK(!field_type->NowStable() || field_type->NowContains(value));
}
}
@@ -320,10 +322,8 @@ void Map::MapVerify() {
VerifyHeapPointer(prototype());
VerifyHeapPointer(instance_descriptors());
SLOW_DCHECK(instance_descriptors()->IsSortedNoDuplicates());
- if (HasTransitionArray()) {
- SLOW_DCHECK(transitions()->IsSortedNoDuplicates());
- SLOW_DCHECK(transitions()->IsConsistentWithBackPointers(this));
- }
+ SLOW_DCHECK(TransitionArray::IsSortedNoDuplicates(this));
+ SLOW_DCHECK(TransitionArray::IsConsistentWithBackPointers(this));
// TODO(ishell): turn it back to SLOW_DCHECK.
CHECK(!FLAG_unbox_double_fields ||
layout_descriptor()->IsConsistentWithMap(this));
@@ -344,7 +344,6 @@ void Map::VerifyOmittedMapChecks() {
if (!FLAG_omit_map_checks_for_leaf_maps) return;
if (!is_stable() ||
is_deprecated() ||
- HasTransitionArray() ||
is_dictionary_map()) {
CHECK_EQ(0, dependent_code()->number_of_entries(
DependentCode::kPrototypeCheckGroup));
@@ -426,7 +425,6 @@ void JSGeneratorObject::JSGeneratorObjectVerify() {
VerifyObjectField(kReceiverOffset);
VerifyObjectField(kOperandStackOffset);
VerifyObjectField(kContinuationOffset);
- VerifyObjectField(kStackHandlerIndexOffset);
}
@@ -646,7 +644,6 @@ void Cell::CellVerify() {
void PropertyCell::PropertyCellVerify() {
CHECK(IsPropertyCell());
VerifyObjectField(kValueOffset);
- VerifyObjectField(kTypeOffset);
}
@@ -1208,14 +1205,28 @@ bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
}
+// static
+bool TransitionArray::IsSortedNoDuplicates(Map* map) {
+ Object* raw_transitions = map->raw_transitions();
+ if (IsFullTransitionArray(raw_transitions)) {
+ return TransitionArray::cast(raw_transitions)->IsSortedNoDuplicates();
+ }
+ // Simple and non-existent transitions are always sorted.
+ return true;
+}
+
+
static bool CheckOneBackPointer(Map* current_map, Object* target) {
return !target->IsMap() || Map::cast(target)->GetBackPointer() == current_map;
}
-bool TransitionArray::IsConsistentWithBackPointers(Map* current_map) {
- for (int i = 0; i < number_of_transitions(); ++i) {
- if (!CheckOneBackPointer(current_map, GetTarget(i))) return false;
+// static
+bool TransitionArray::IsConsistentWithBackPointers(Map* map) {
+ Object* transitions = map->raw_transitions();
+ for (int i = 0; i < TransitionArray::NumberOfTransitions(transitions); ++i) {
+ Map* target = TransitionArray::GetTarget(transitions, i);
+ if (!CheckOneBackPointer(map, target)) return false;
}
return true;
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 926e1c7a73..67d64c4779 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -51,12 +51,6 @@ Smi* PropertyDetails::AsSmi() const {
}
-PropertyDetails PropertyDetails::AsDeleted() const {
- Smi* smi = Smi::FromInt(value_ | DeletedField::encode(1));
- return PropertyDetails(smi);
-}
-
-
int PropertyDetails::field_width_in_words() const {
DCHECK(location() == kField);
if (!FLAG_unbox_double_fields) return 1;
@@ -719,6 +713,9 @@ bool Object::IsDescriptorArray() const {
}
+bool Object::IsArrayList() const { return IsFixedArray(); }
+
+
bool Object::IsLayoutDescriptor() const {
return IsSmi() || IsFixedTypedArrayBase();
}
@@ -758,6 +755,14 @@ bool Object::IsDeoptimizationOutputData() const {
}
+bool Object::IsHandlerTable() const {
+ if (!IsFixedArray()) return false;
+ // There's actually no way to see the difference between a fixed array and
+ // a handler table array.
+ return true;
+}
+
+
bool Object::IsDependentCode() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
@@ -1877,41 +1882,6 @@ void JSObject::initialize_elements() {
}
-Handle<String> Map::ExpectedTransitionKey(Handle<Map> map) {
- DisallowHeapAllocation no_gc;
- if (!map->HasTransitionArray()) return Handle<String>::null();
- TransitionArray* transitions = map->transitions();
- if (!transitions->IsSimpleTransition()) return Handle<String>::null();
- int transition = TransitionArray::kSimpleTransitionIndex;
- PropertyDetails details = transitions->GetTargetDetails(transition);
- Name* name = transitions->GetKey(transition);
- if (details.type() != DATA) return Handle<String>::null();
- if (details.attributes() != NONE) return Handle<String>::null();
- if (!name->IsString()) return Handle<String>::null();
- return Handle<String>(String::cast(name));
-}
-
-
-Handle<Map> Map::ExpectedTransitionTarget(Handle<Map> map) {
- DCHECK(!ExpectedTransitionKey(map).is_null());
- return Handle<Map>(map->transitions()->GetTarget(
- TransitionArray::kSimpleTransitionIndex));
-}
-
-
-Handle<Map> Map::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
- DisallowHeapAllocation no_allocation;
- if (!map->HasTransitionArray()) return Handle<Map>::null();
- TransitionArray* transitions = map->transitions();
- int transition = transitions->Search(kData, *key, NONE);
- if (transition == TransitionArray::kNotFound) return Handle<Map>::null();
- PropertyDetails details = transitions->GetTargetDetails(transition);
- if (details.type() != DATA) return Handle<Map>::null();
- DCHECK_EQ(NONE, details.attributes());
- return Handle<Map>(transitions->GetTarget(transition));
-}
-
-
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
@@ -1938,16 +1908,7 @@ void Cell::set_value(Object* val, WriteBarrierMode ignored) {
}
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
-
-Object* PropertyCell::type_raw() const {
- return READ_FIELD(this, kTypeOffset);
-}
-
-
-void PropertyCell::set_type_raw(Object* val, WriteBarrierMode ignored) {
- WRITE_FIELD(this, kTypeOffset, val);
-}
-
+ACCESSORS(PropertyCell, value, Object, kValueOffset)
Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
@@ -2139,6 +2100,31 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
}
+void JSObject::WriteToField(int descriptor, Object* value) {
+ DisallowHeapAllocation no_gc;
+
+ DescriptorArray* desc = map()->instance_descriptors();
+ PropertyDetails details = desc->GetDetails(descriptor);
+
+ DCHECK(details.type() == DATA);
+
+ FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
+ if (details.representation().IsDouble()) {
+ // Nothing more to be done.
+ if (value->IsUninitialized()) return;
+ if (IsUnboxedDoubleField(index)) {
+ RawFastDoublePropertyAtPut(index, value->Number());
+ } else {
+ HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
+ DCHECK(box->IsMutableHeapNumber());
+ box->set_value(value->Number());
+ }
+ } else {
+ RawFastPropertyAtPut(index, value);
+ }
+}
+
+
int JSObject::GetInObjectPropertyOffset(int index) {
return map()->GetInObjectPropertyOffset(index);
}
@@ -2371,6 +2357,7 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
Object* WeakFixedArray::Get(int index) const {
Object* raw = FixedArray::cast(this)->get(index + kFirstIndex);
if (raw->IsSmi()) return raw;
+ DCHECK(raw->IsWeakCell());
return WeakCell::cast(raw)->value();
}
@@ -2401,6 +2388,39 @@ void WeakFixedArray::set_last_used_index(int index) {
}
+int ArrayList::Length() {
+ if (FixedArray::cast(this)->length() == 0) return 0;
+ return Smi::cast(FixedArray::cast(this)->get(kLengthIndex))->value();
+}
+
+
+void ArrayList::SetLength(int length) {
+ return FixedArray::cast(this)->set(kLengthIndex, Smi::FromInt(length));
+}
+
+
+Object* ArrayList::Get(int index) {
+ return FixedArray::cast(this)->get(kFirstIndex + index);
+}
+
+
+Object** ArrayList::Slot(int index) {
+ return data_start() + kFirstIndex + index;
+}
+
+
+void ArrayList::Set(int index, Object* obj) {
+ FixedArray::cast(this)->set(kFirstIndex + index, obj);
+}
+
+
+void ArrayList::Clear(int index, Object* undefined) {
+ DCHECK(undefined->IsUndefined());
+ FixedArray::cast(this)
+ ->set(kFirstIndex + index, undefined, SKIP_WRITE_BARRIER);
+}
+
+
void ConstantPoolArray::NumberOfEntries::increment(Type type) {
DCHECK(type < NUMBER_OF_TYPES);
element_counts_[type]++;
@@ -3037,7 +3057,7 @@ FixedArrayBase* Map::GetInitialElements() {
return empty_array;
} else if (has_fixed_typed_array_elements()) {
FixedTypedArrayBase* empty_array =
- GetHeap()->EmptyFixedTypedArrayForMap(this);
+ GetHeap()->EmptyFixedTypedArrayForMap(this);
DCHECK(!GetHeap()->InNewSpace(empty_array));
return empty_array;
} else {
@@ -3136,7 +3156,12 @@ int DescriptorArray::GetFieldIndex(int descriptor_number) {
HeapType* DescriptorArray::GetFieldType(int descriptor_number) {
DCHECK(GetDetails(descriptor_number).location() == kField);
- return HeapType::cast(GetValue(descriptor_number));
+ Object* value = GetValue(descriptor_number);
+ if (value->IsWeakCell()) {
+ if (WeakCell::cast(value)->cleared()) return HeapType::None();
+ value = WeakCell::cast(value)->value();
+ }
+ return HeapType::cast(value);
}
@@ -3294,6 +3319,7 @@ void SeededNumberDictionary::set_requires_slow_elements() {
CAST_ACCESSOR(AccessorInfo)
+CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(Code)
@@ -3324,6 +3350,7 @@ CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(GlobalObject)
+CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
@@ -4198,7 +4225,7 @@ void* FixedTypedArrayBase::DataPtr() {
}
-int FixedTypedArrayBase::DataSize(InstanceType type) {
+int FixedTypedArrayBase::ElementSize(InstanceType type) {
int element_size;
switch (type) {
#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
@@ -4212,7 +4239,12 @@ int FixedTypedArrayBase::DataSize(InstanceType type) {
UNREACHABLE();
return 0;
}
- return length() * element_size;
+ return element_size;
+}
+
+
+int FixedTypedArrayBase::DataSize(InstanceType type) {
+ return length() * ElementSize(type);
}
@@ -4231,6 +4263,11 @@ int FixedTypedArrayBase::TypedArraySize(InstanceType type) {
}
+int FixedTypedArrayBase::TypedArraySize(InstanceType type, int length) {
+ return OBJECT_POINTER_ALIGN(kDataOffset + length * ElementSize(type));
+}
+
+
uint8_t Uint8ArrayTraits::defaultValue() { return 0; }
@@ -5250,22 +5287,6 @@ void Map::set_prototype(Object* value, WriteBarrierMode mode) {
}
-// If the descriptor is using the empty transition array, install a new empty
-// transition array that will have place for an element transition.
-static void EnsureHasTransitionArray(Handle<Map> map) {
- Handle<TransitionArray> transitions;
- if (!map->HasTransitionArray()) {
- transitions = TransitionArray::Allocate(map->GetIsolate(), 0);
- transitions->set_back_pointer_storage(map->GetBackPointer());
- } else if (!map->transitions()->IsFullTransitionArray()) {
- transitions = TransitionArray::ExtendToFullTransitionArray(map);
- } else {
- return;
- }
- map->set_transitions(*transitions);
-}
-
-
LayoutDescriptor* Map::layout_descriptor_gc_safe() {
Object* layout_desc = READ_FIELD(this, kLayoutDecriptorOffset);
return LayoutDescriptor::cast_gc_safe(layout_desc);
@@ -5360,158 +5381,58 @@ void Map::AppendDescriptor(Descriptor* desc) {
Object* Map::GetBackPointer() {
- Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
- if (object->IsTransitionArray()) {
- return TransitionArray::cast(object)->back_pointer_storage();
- } else {
- DCHECK(object->IsMap() || object->IsUndefined());
+ Object* object = constructor_or_backpointer();
+ if (object->IsMap()) {
return object;
}
+ return GetIsolate()->heap()->undefined_value();
}
-bool Map::HasElementsTransition() {
- return HasTransitionArray() && transitions()->HasElementsTransition();
-}
-
-
-bool Map::HasTransitionArray() const {
- Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
- return object->IsTransitionArray();
-}
-
-
-Map* Map::elements_transition_map() {
- int index =
- transitions()->SearchSpecial(GetHeap()->elements_transition_symbol());
- return transitions()->GetTarget(index);
+Map* Map::ElementsTransitionMap() {
+ return TransitionArray::SearchSpecial(
+ this, GetHeap()->elements_transition_symbol());
}
-bool Map::CanHaveMoreTransitions() {
- if (!HasTransitionArray()) return true;
- return transitions()->number_of_transitions() <
- TransitionArray::kMaxNumberOfTransitions;
-}
+ACCESSORS(Map, raw_transitions, Object, kTransitionsOffset)
-Map* Map::GetTransition(int transition_index) {
- return transitions()->GetTarget(transition_index);
-}
-
-
-int Map::SearchSpecialTransition(Symbol* name) {
- if (HasTransitionArray()) {
- return transitions()->SearchSpecial(name);
- }
- return TransitionArray::kNotFound;
-}
-
-
-int Map::SearchTransition(PropertyKind kind, Name* name,
- PropertyAttributes attributes) {
- if (HasTransitionArray()) {
- return transitions()->Search(kind, name, attributes);
- }
- return TransitionArray::kNotFound;
-}
-
-
-FixedArray* Map::GetPrototypeTransitions() {
- if (!HasTransitionArray()) return GetHeap()->empty_fixed_array();
- if (!transitions()->HasPrototypeTransitions()) {
- return GetHeap()->empty_fixed_array();
- }
- return transitions()->GetPrototypeTransitions();
-}
-
-
-void Map::SetPrototypeTransitions(
- Handle<Map> map, Handle<FixedArray> proto_transitions) {
- EnsureHasTransitionArray(map);
- int old_number_of_transitions = map->NumberOfProtoTransitions();
- if (Heap::ShouldZapGarbage() && map->HasPrototypeTransitions()) {
- DCHECK(map->GetPrototypeTransitions() != *proto_transitions);
- map->ZapPrototypeTransitions();
- }
- map->transitions()->SetPrototypeTransitions(*proto_transitions);
- map->SetNumberOfProtoTransitions(old_number_of_transitions);
-}
-
-
-bool Map::HasPrototypeTransitions() {
- return HasTransitionArray() && transitions()->HasPrototypeTransitions();
+void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
+ DCHECK(instance_type() >= FIRST_JS_RECEIVER_TYPE);
+ DCHECK((value->IsUndefined() && GetBackPointer()->IsMap()) ||
+ (value->IsMap() && GetBackPointer()->IsUndefined()));
+ DCHECK(!value->IsMap() ||
+ Map::cast(value)->GetConstructor() == constructor_or_backpointer());
+ set_constructor_or_backpointer(value, mode);
}
-TransitionArray* Map::transitions() const {
- DCHECK(HasTransitionArray());
- Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
- return TransitionArray::cast(object);
-}
+ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
+ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
+ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
+ACCESSORS(Map, constructor_or_backpointer, Object,
+ kConstructorOrBackPointerOffset)
-void Map::set_transitions(TransitionArray* transition_array,
- WriteBarrierMode mode) {
- // Transition arrays are not shared. When one is replaced, it should not
- // keep referenced objects alive, so we zap it.
- // When there is another reference to the array somewhere (e.g. a handle),
- // not zapping turns from a waste of memory into a source of crashes.
- if (HasTransitionArray()) {
-#ifdef DEBUG
- for (int i = 0; i < transitions()->number_of_transitions(); i++) {
- Map* target = transitions()->GetTarget(i);
- if (target->instance_descriptors() == instance_descriptors()) {
- Name* key = transitions()->GetKey(i);
- int new_target_index;
- if (TransitionArray::IsSpecialTransition(key)) {
- new_target_index = transition_array->SearchSpecial(Symbol::cast(key));
- } else {
- PropertyDetails details =
- TransitionArray::GetTargetDetails(key, target);
- new_target_index = transition_array->Search(details.kind(), key,
- details.attributes());
- }
- DCHECK_NE(TransitionArray::kNotFound, new_target_index);
- DCHECK_EQ(target, transition_array->GetTarget(new_target_index));
- }
- }
-#endif
- DCHECK(transitions() != transition_array);
- ZapTransitions();
+Object* Map::GetConstructor() const {
+ Object* maybe_constructor = constructor_or_backpointer();
+ // Follow any back pointers.
+ while (maybe_constructor->IsMap()) {
+ maybe_constructor =
+ Map::cast(maybe_constructor)->constructor_or_backpointer();
}
-
- WRITE_FIELD(this, kTransitionsOrBackPointerOffset, transition_array);
- CONDITIONAL_WRITE_BARRIER(
- GetHeap(), this, kTransitionsOrBackPointerOffset, transition_array, mode);
-}
-
-
-void Map::init_back_pointer(Object* undefined) {
- DCHECK(undefined->IsUndefined());
- WRITE_FIELD(this, kTransitionsOrBackPointerOffset, undefined);
+ return maybe_constructor;
}
-void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
- DCHECK(instance_type() >= FIRST_JS_RECEIVER_TYPE);
- DCHECK((value->IsUndefined() && GetBackPointer()->IsMap()) ||
- (value->IsMap() && GetBackPointer()->IsUndefined()));
- Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
- if (object->IsTransitionArray()) {
- TransitionArray::cast(object)->set_back_pointer_storage(value);
- } else {
- WRITE_FIELD(this, kTransitionsOrBackPointerOffset, value);
- CONDITIONAL_WRITE_BARRIER(
- GetHeap(), this, kTransitionsOrBackPointerOffset, value, mode);
- }
+void Map::SetConstructor(Object* constructor, WriteBarrierMode mode) {
+ // Never overwrite a back pointer with a constructor.
+ DCHECK(!constructor_or_backpointer()->IsMap());
+ set_constructor_or_backpointer(constructor, mode);
}
-ACCESSORS(Map, code_cache, Object, kCodeCacheOffset)
-ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
-ACCESSORS(Map, constructor, Object, kConstructorOffset)
-
ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset)
ACCESSORS(JSFunction, literals_or_bindings, FixedArray, kLiteralsOffset)
ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
@@ -5551,6 +5472,7 @@ SMI_ACCESSORS(InterceptorInfo, flags, kFlagsOffset)
BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
kCanInterceptSymbolsBit)
BOOL_ACCESSORS(InterceptorInfo, flags, all_can_read, kAllCanReadBit)
+BOOL_ACCESSORS(InterceptorInfo, flags, non_masking, kNonMasking)
ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
@@ -5671,6 +5593,8 @@ BOOL_ACCESSORS(FunctionTemplateInfo, flag, remove_prototype,
BOOL_ACCESSORS(FunctionTemplateInfo, flag, do_not_cache,
kDoNotCacheBit)
BOOL_ACCESSORS(FunctionTemplateInfo, flag, instantiated, kInstantiatedBit)
+BOOL_ACCESSORS(FunctionTemplateInfo, flag, accept_any_receiver,
+ kAcceptAnyReceiver)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
kIsExpressionBit)
BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
@@ -5854,7 +5778,6 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
-ACCESSORS(CodeCache, weak_cell_cache, Object, kWeakCellCacheOffset)
ACCESSORS(PolymorphicCodeCache, cache, Object, kCacheOffset)
@@ -5932,7 +5855,10 @@ void SharedFunctionInfo::set_scope_info(ScopeInfo* value,
bool SharedFunctionInfo::is_compiled() {
- return code() != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy);
+ Builtins* builtins = GetIsolate()->builtins();
+ DCHECK(code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent));
+ DCHECK(code() != builtins->builtin(Builtins::kCompileOptimized));
+ return code() != builtins->builtin(Builtins::kCompileLazy);
}
@@ -6199,7 +6125,12 @@ Object* JSFunction::prototype() {
DCHECK(has_prototype());
// If the function's prototype property has been set to a non-JSObject
// value, that value is stored in the constructor field of the map.
- if (map()->has_non_instance_prototype()) return map()->constructor();
+ if (map()->has_non_instance_prototype()) {
+ Object* prototype = map()->GetConstructor();
+ // The map must have a prototype in that field, not a back pointer.
+ DCHECK(!prototype->IsMap());
+ return prototype;
+ }
return instance_prototype();
}
@@ -6210,7 +6141,10 @@ bool JSFunction::should_have_prototype() {
bool JSFunction::is_compiled() {
- return code() != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy);
+ Builtins* builtins = GetIsolate()->builtins();
+ return code() != builtins->builtin(Builtins::kCompileLazy) &&
+ code() != builtins->builtin(Builtins::kCompileOptimized) &&
+ code() != builtins->builtin(Builtins::kCompileOptimizedConcurrent);
}
@@ -6267,20 +6201,6 @@ void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
}
-Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) {
- DCHECK(id < kJSBuiltinsCount); // id is unsigned.
- return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id)));
-}
-
-
-void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
- Code* value) {
- DCHECK(id < kJSBuiltinsCount); // id is unsigned.
- WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
- DCHECK(!GetHeap()->InNewSpace(value));
-}
-
-
ACCESSORS(JSProxy, handler, Object, kHandlerOffset)
ACCESSORS(JSProxy, hash, Object, kHashOffset)
ACCESSORS(JSFunctionProxy, call_trap, Object, kCallTrapOffset)
@@ -6336,7 +6256,6 @@ ACCESSORS(JSGeneratorObject, context, Context, kContextOffset)
ACCESSORS(JSGeneratorObject, receiver, Object, kReceiverOffset)
SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
-SMI_ACCESSORS(JSGeneratorObject, stack_handler_index, kStackHandlerIndexOffset)
bool JSGeneratorObject::is_suspended() {
DCHECK_LT(kGeneratorExecuting, kGeneratorClosed);
@@ -6942,8 +6861,7 @@ Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
return JSProxy::HasPropertyWithHandler(proxy, name);
}
Maybe<PropertyAttributes> result = GetPropertyAttributes(object, name);
- if (!result.has_value) return Maybe<bool>();
- return maybe(result.value != ABSENT);
+ return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
}
@@ -6954,8 +6872,7 @@ Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
return JSProxy::HasPropertyWithHandler(proxy, name);
}
Maybe<PropertyAttributes> result = GetOwnPropertyAttributes(object, name);
- if (!result.has_value) return Maybe<bool>();
- return maybe(result.value != ABSENT);
+ return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
}
@@ -7014,8 +6931,7 @@ Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
}
Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver(
Handle<JSObject>::cast(object), object, index, true);
- if (!result.has_value) return Maybe<bool>();
- return maybe(result.value != ABSENT);
+ return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
}
@@ -7027,8 +6943,7 @@ Maybe<bool> JSReceiver::HasOwnElement(Handle<JSReceiver> object,
}
Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver(
Handle<JSObject>::cast(object), object, index, false);
- if (!result.has_value) return Maybe<bool>();
- return maybe(result.value != ABSENT);
+ return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
}
@@ -7102,9 +7017,7 @@ void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
Handle<Object> key,
Handle<Object> value,
PropertyDetails details) {
- DCHECK(!key->IsName() ||
- details.IsDeleted() ||
- details.dictionary_index() > 0);
+ DCHECK(!key->IsName() || details.dictionary_index() > 0);
int index = DerivedHashTable::EntryToIndex(entry);
DisallowHeapAllocation no_gc;
WriteBarrierMode mode = FixedArray::GetWriteBarrierMode(no_gc);
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 87f039628b..1b31c1bfff 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -432,11 +432,12 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
if (FLAG_unbox_double_fields) {
os << "\n - layout descriptor: " << Brief(layout_descriptor());
}
- if (HasTransitionArray()) {
- os << "\n - transitions: " << Brief(transitions());
+ if (TransitionArray::NumberOfTransitions(raw_transitions()) > 0) {
+ os << "\n - transitions: ";
+ TransitionArray::PrintTransitions(os, raw_transitions());
}
os << "\n - prototype: " << Brief(prototype());
- os << "\n - constructor: " << Brief(constructor());
+ os << "\n - constructor: " << Brief(GetConstructor());
os << "\n - code cache: " << Brief(code_cache());
os << "\n - dependent code: " << Brief(dependent_code());
os << "\n";
@@ -1138,19 +1139,20 @@ void DescriptorArray::PrintDescriptors(std::ostream& os) { // NOLINT
void TransitionArray::Print() {
OFStream os(stdout);
- this->PrintTransitions(os);
+ TransitionArray::PrintTransitions(os, this);
os << std::flush;
}
-void TransitionArray::PrintTransitions(std::ostream& os,
+void TransitionArray::PrintTransitions(std::ostream& os, Object* transitions,
bool print_header) { // NOLINT
+ int num_transitions = NumberOfTransitions(transitions);
if (print_header) {
- os << "Transition array " << number_of_transitions() << "\n";
+ os << "Transition array " << num_transitions << "\n";
}
- for (int i = 0; i < number_of_transitions(); i++) {
- Name* key = GetKey(i);
- Map* target = GetTarget(i);
+ for (int i = 0; i < num_transitions; i++) {
+ Name* key = GetKey(transitions, i);
+ Map* target = GetTarget(transitions, i);
os << " ";
#ifdef OBJECT_PRINT
key->NamePrint(os);
@@ -1158,16 +1160,17 @@ void TransitionArray::PrintTransitions(std::ostream& os,
key->ShortPrint(os);
#endif
os << ": ";
- if (key == GetHeap()->nonextensible_symbol()) {
+ Heap* heap = key->GetHeap();
+ if (key == heap->nonextensible_symbol()) {
os << " (transition to non-extensible)";
- } else if (key == GetHeap()->sealed_symbol()) {
+ } else if (key == heap->sealed_symbol()) {
os << " (transition to sealed)";
- } else if (key == GetHeap()->frozen_symbol()) {
+ } else if (key == heap->frozen_symbol()) {
os << " (transition to frozen)";
- } else if (key == GetHeap()->elements_transition_symbol()) {
+ } else if (key == heap->elements_transition_symbol()) {
os << " (transition to " << ElementsKindToString(target->elements_kind())
<< ")";
- } else if (key == GetHeap()->observed_symbol()) {
+ } else if (key == heap->observed_symbol()) {
os << " (transition to Object.observe)";
} else {
PropertyDetails details = GetTargetDetails(key, target);
@@ -1177,7 +1180,9 @@ void TransitionArray::PrintTransitions(std::ostream& os,
}
os << (details.kind() == kData ? "data" : "accessor");
if (details.location() == kDescriptor) {
- os << " " << Brief(GetTargetValue(i));
+ Object* value =
+ target->instance_descriptors()->GetValue(target->LastAdded());
+ os << " " << Brief(value);
}
os << "), attrs: " << details.attributes();
}
@@ -1187,8 +1192,7 @@ void TransitionArray::PrintTransitions(std::ostream& os,
void JSObject::PrintTransitions(std::ostream& os) { // NOLINT
- if (!map()->HasTransitionArray()) return;
- map()->transitions()->PrintTransitions(os, false);
+ TransitionArray::PrintTransitions(os, map()->raw_transitions());
}
#endif // defined(DEBUG) || defined(OBJECT_PRINT)
} } // namespace v8::internal
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 0eda4912e6..77a82e6d94 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <iomanip>
#include <sstream>
#include "src/v8.h"
@@ -14,6 +15,7 @@
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/compiler.h"
#include "src/cpu-profiler.h"
#include "src/date.h"
#include "src/debug.h"
@@ -125,12 +127,14 @@ MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
break;
}
case LookupIterator::ACCESS_CHECK:
- if (it->HasAccess(v8::ACCESS_GET)) break;
+ if (it->HasAccess()) break;
return JSObject::GetPropertyWithFailedAccessCheck(it);
case LookupIterator::ACCESSOR:
return GetPropertyWithAccessor(it->GetReceiver(), it->name(),
it->GetHolder<JSObject>(),
it->GetAccessors());
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return it->factory()->undefined_value();
case LookupIterator::DATA:
return it->GetDataValue();
}
@@ -155,7 +159,7 @@ Handle<Object> JSObject::GetDataProperty(LookupIterator* it) {
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::ACCESS_CHECK:
- if (it->HasAccess(v8::ACCESS_GET)) continue;
+ if (it->HasAccess()) continue;
// Fall through.
case LookupIterator::JSPROXY:
it->NotFound();
@@ -166,6 +170,8 @@ Handle<Object> JSObject::GetDataProperty(LookupIterator* it) {
// relevant.
it->NotFound();
return it->isolate()->factory()->undefined_value();
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return it->isolate()->factory()->undefined_value();
case LookupIterator::DATA:
return it->GetDataValue();
}
@@ -219,7 +225,7 @@ bool FunctionTemplateInfo::IsTemplateFor(Map* map) {
// There is a constraint on the object; check.
if (!map->IsJSObjectMap()) return false;
// Fetch the constructor function of the object.
- Object* cons_obj = map->constructor();
+ Object* cons_obj = map->GetConstructor();
if (!cons_obj->IsJSFunction()) return false;
JSFunction* fun = JSFunction::cast(cons_obj);
// Iterate through the chain of inheriting function templates to
@@ -468,7 +474,7 @@ MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
if (it->isolate()->has_scheduled_exception()) break;
if (!result.is_null()) return result;
}
- it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_GET);
+ it->isolate()->ReportFailedAccessCheck(checked);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
return it->factory()->undefined_value();
}
@@ -479,18 +485,18 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
Handle<JSObject> checked = it->GetHolder<JSObject>();
while (FindAllCanReadHolder(it)) {
if (it->state() == LookupIterator::ACCESSOR) {
- return maybe(it->property_details().attributes());
+ return Just(it->property_details().attributes());
}
DCHECK_EQ(LookupIterator::INTERCEPTOR, it->state());
auto result = GetPropertyAttributesWithInterceptor(
it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
if (it->isolate()->has_scheduled_exception()) break;
- if (result.has_value && result.value != ABSENT) return result;
+ if (result.IsJust() && result.FromJust() != ABSENT) return result;
}
- it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_HAS);
+ it->isolate()->ReportFailedAccessCheck(checked);
RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(),
- Maybe<PropertyAttributes>());
- return maybe(ABSENT);
+ Nothing<PropertyAttributes>());
+ return Just(ABSENT);
}
@@ -516,7 +522,7 @@ MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck(
it->GetAccessors(), language_mode);
}
- it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_SET);
+ it->isolate()->ReportFailedAccessCheck(checked);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
return value;
}
@@ -536,40 +542,30 @@ void JSObject::SetNormalizedProperty(Handle<JSObject> object,
int entry = property_dictionary->FindEntry(name);
if (entry == NameDictionary::kNotFound) {
- Handle<Object> store_value = value;
if (object->IsGlobalObject()) {
- store_value = object->GetIsolate()->factory()->NewPropertyCell(value);
- }
-
- property_dictionary = NameDictionary::Add(
- property_dictionary, name, store_value, details);
+ auto cell = object->GetIsolate()->factory()->NewPropertyCell();
+ cell->set_value(*value);
+ auto cell_type = value->IsUndefined() ? PropertyCellType::kUndefined
+ : PropertyCellType::kConstant;
+ details = details.set_cell_type(cell_type);
+ value = cell;
+ }
+ property_dictionary =
+ NameDictionary::Add(property_dictionary, name, value, details);
object->set_properties(*property_dictionary);
return;
}
- PropertyDetails original_details = property_dictionary->DetailsAt(entry);
- int enumeration_index;
- // Preserve the enumeration index unless the property was deleted.
- if (original_details.IsDeleted()) {
- enumeration_index = property_dictionary->NextEnumerationIndex();
- property_dictionary->SetNextEnumerationIndex(enumeration_index + 1);
- } else {
- enumeration_index = original_details.dictionary_index();
- DCHECK(enumeration_index > 0);
- }
-
- details = PropertyDetails(
- details.attributes(), details.type(), enumeration_index);
-
if (object->IsGlobalObject()) {
- Handle<PropertyCell> cell(
- PropertyCell::cast(property_dictionary->ValueAt(entry)));
- PropertyCell::SetValueInferType(cell, value);
- // Please note we have to update the property details.
- property_dictionary->DetailsAtPut(entry, details);
- } else {
- property_dictionary->SetEntry(entry, name, value, details);
+ PropertyCell::UpdateCell(property_dictionary, entry, value, details);
+ return;
}
+
+ PropertyDetails original_details = property_dictionary->DetailsAt(entry);
+ int enumeration_index = original_details.dictionary_index();
+ DCHECK(enumeration_index > 0);
+ details = details.set_index(enumeration_index);
+ property_dictionary->SetEntry(entry, name, value, details);
}
@@ -604,7 +600,7 @@ MaybeHandle<Object> JSObject::GetElementWithFailedAccessCheck(
if (!result.is_null()) return result;
where_to_start = PrototypeIterator::START_AT_PROTOTYPE;
}
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_GET);
+ isolate->ReportFailedAccessCheck(object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
@@ -621,14 +617,14 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributesWithFailedAccessCheck(
FindIndexedAllCanReadHolder(isolate, holder, where_to_start);
if (!all_can_read_holder.ToHandle(&holder)) break;
auto result =
- JSObject::GetElementAttributeFromInterceptor(object, receiver, index);
+ JSObject::GetElementAttributeFromInterceptor(holder, receiver, index);
if (isolate->has_scheduled_exception()) break;
- if (result.has_value && result.value != ABSENT) return result;
+ if (result.IsJust() && result.FromJust() != ABSENT) return result;
where_to_start = PrototypeIterator::START_AT_PROTOTYPE;
}
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<PropertyAttributes>());
- return maybe(ABSENT);
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+ return Just(ABSENT);
}
@@ -663,7 +659,7 @@ MaybeHandle<Object> Object::GetElementWithReceiver(Isolate* isolate,
// Check access rights if needed.
if (js_object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_GET)) {
+ if (!isolate->MayAccess(js_object)) {
return JSObject::GetElementWithFailedAccessCheck(isolate, js_object,
receiver, index);
}
@@ -710,8 +706,8 @@ MaybeHandle<Object> Object::SetElementWithReceiver(
// Check access rights if needed.
if (js_object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(js_object, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(js_object, v8::ACCESS_SET);
+ if (!isolate->MayAccess(js_object)) {
+ isolate->ReportFailedAccessCheck(js_object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
@@ -721,12 +717,12 @@ MaybeHandle<Object> Object::SetElementWithReceiver(
Maybe<PropertyAttributes> from_interceptor =
JSObject::GetElementAttributeFromInterceptor(js_object, receiver,
index);
- if (!from_interceptor.has_value) return MaybeHandle<Object>();
- if ((from_interceptor.value & READ_ONLY) != 0) {
+ if (!from_interceptor.IsJust()) return MaybeHandle<Object>();
+ if ((from_interceptor.FromJust() & READ_ONLY) != 0) {
return WriteToReadOnlyElement(isolate, receiver, index, value,
language_mode);
}
- done = from_interceptor.value != ABSENT;
+ done = from_interceptor.FromJust() != ABSENT;
}
if (!done &&
@@ -798,10 +794,16 @@ Map* Object::GetRootMap(Isolate* isolate) {
Object* Object::GetHash() {
// The object is either a number, a name, an odd-ball,
// a real JS object, or a Harmony proxy.
- if (IsNumber()) {
- uint32_t hash = std::isnan(Number())
- ? Smi::kMaxValue
- : ComputeLongHash(double_to_uint64(Number()));
+ if (IsSmi()) {
+ int num = Smi::cast(this)->value();
+ uint32_t hash = ComputeLongHash(double_to_uint64(static_cast<double>(num)));
+ return Smi::FromInt(hash & Smi::kMaxValue);
+ }
+ if (IsHeapNumber()) {
+ double num = HeapNumber::cast(this)->value();
+ if (std::isnan(num)) return Smi::FromInt(Smi::kMaxValue);
+ if (i::IsMinusZero(num)) num = 0;
+ uint32_t hash = ComputeLongHash(double_to_uint64(num));
return Smi::FromInt(hash & Smi::kMaxValue);
}
if (IsName()) {
@@ -1202,7 +1204,7 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
default: {
Map* map_of_this = map();
Heap* heap = GetHeap();
- Object* constructor = map_of_this->constructor();
+ Object* constructor = map_of_this->GetConstructor();
bool printed = false;
if (constructor->IsHeapObject() &&
!heap->Contains(HeapObject::cast(constructor))) {
@@ -1665,8 +1667,9 @@ String* JSReceiver::class_name() {
if (IsJSFunction() || IsJSFunctionProxy()) {
return GetHeap()->Function_string();
}
- if (map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(map()->constructor());
+ Object* maybe_constructor = map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
return String::cast(constructor->shared()->instance_class_name());
}
// If the constructor is not present, return "Object".
@@ -1675,8 +1678,9 @@ String* JSReceiver::class_name() {
String* Map::constructor_name() {
- if (constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(this->constructor());
+ Object* maybe_constructor = GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
String* name = String::cast(constructor->shared()->name());
if (name->length() > 0) return name;
String* inferred_name = constructor->shared()->inferred_name();
@@ -1695,6 +1699,12 @@ String* JSReceiver::constructor_name() {
}
+static Handle<Object> WrapType(Handle<HeapType> type) {
+ if (type->IsClass()) return Map::WeakCellForMap(type->AsClass()->Map());
+ return type;
+}
+
+
MaybeHandle<Map> Map::CopyWithField(Handle<Map> map,
Handle<Name> name,
Handle<HeapType> type,
@@ -1720,7 +1730,10 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map,
type = HeapType::Any(isolate);
}
- DataDescriptor new_field_desc(name, index, type, attributes, representation);
+ Handle<Object> wrapped_type(WrapType(type));
+
+ DataDescriptor new_field_desc(name, index, wrapped_type, attributes,
+ representation);
Handle<Map> new_map = Map::CopyAddDescriptor(map, &new_field_desc, flag);
int unused_property_fields = new_map->unused_property_fields() - 1;
if (unused_property_fields < 0) {
@@ -1754,23 +1767,27 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
DCHECK(!object->HasFastProperties());
Isolate* isolate = object->GetIsolate();
Handle<NameDictionary> dict(object->property_dictionary());
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kInvalid);
if (object->IsGlobalObject()) {
- // In case name is an orphaned property reuse the cell.
int entry = dict->FindEntry(name);
+ // If there's a cell there, just invalidate and set the property.
if (entry != NameDictionary::kNotFound) {
- Handle<PropertyCell> cell(PropertyCell::cast(dict->ValueAt(entry)));
- PropertyCell::SetValueInferType(cell, value);
- // Assign an enumeration index to the property and update
- // SetNextEnumerationIndex.
+ PropertyCell::UpdateCell(dict, entry, value, details);
+ // TODO(dcarney): move this to UpdateCell.
+ // Need to adjust the details.
int index = dict->NextEnumerationIndex();
- PropertyDetails details(attributes, DATA, index);
dict->SetNextEnumerationIndex(index + 1);
- dict->SetEntry(entry, name, cell, details);
+ details = dict->DetailsAt(entry).set_index(index);
+ dict->DetailsAtPut(entry, details);
return;
}
- value = isolate->factory()->NewPropertyCell(value);
+ auto cell = isolate->factory()->NewPropertyCell();
+ cell->set_value(*value);
+ auto cell_type = value->IsUndefined() ? PropertyCellType::kUndefined
+ : PropertyCellType::kConstant;
+ details = details.set_cell_type(cell_type);
+ value = cell;
}
- PropertyDetails details(attributes, DATA, 0);
Handle<NameDictionary> result =
NameDictionary::Add(dict, name, value, details);
if (*dict != *result) object->set_properties(*result);
@@ -1778,7 +1795,7 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
Context* JSObject::GetCreationContext() {
- Object* constructor = this->map()->constructor();
+ Object* constructor = this->map()->GetConstructor();
JSFunction* function;
if (!constructor->IsJSFunction()) {
// Functions have null as a constructor,
@@ -1881,7 +1898,8 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
old_map->GetHeap()->empty_descriptor_array(),
LayoutDescriptor::FastPointerLayout());
// Ensure that no transition was inserted for prototype migrations.
- DCHECK(!old_map->HasTransitionArray());
+ DCHECK_EQ(0, TransitionArray::NumberOfTransitions(
+ old_map->raw_transitions()));
DCHECK(new_map->GetBackPointer()->IsUndefined());
}
} else {
@@ -2167,11 +2185,10 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(
void Map::DeprecateTransitionTree() {
if (is_deprecated()) return;
- if (HasTransitionArray()) {
- TransitionArray* transitions = this->transitions();
- for (int i = 0; i < transitions->number_of_transitions(); i++) {
- transitions->GetTarget(i)->DeprecateTransitionTree();
- }
+ Object* transitions = raw_transitions();
+ int num_transitions = TransitionArray::NumberOfTransitions(transitions);
+ for (int i = 0; i < num_transitions; ++i) {
+ TransitionArray::GetTarget(transitions, i)->DeprecateTransitionTree();
}
deprecate();
dependent_code()->DeoptimizeDependentCodeGroup(
@@ -2196,13 +2213,11 @@ bool Map::DeprecateTarget(PropertyKind kind, Name* key,
DescriptorArray* new_descriptors,
LayoutDescriptor* new_layout_descriptor) {
bool transition_target_deprecated = false;
- if (HasTransitionArray()) {
- TransitionArray* transitions = this->transitions();
- int transition = transitions->Search(kind, key, attributes);
- if (transition != TransitionArray::kNotFound) {
- transitions->GetTarget(transition)->DeprecateTransitionTree();
- transition_target_deprecated = true;
- }
+ Map* maybe_transition =
+ TransitionArray::SearchTransition(this, kind, key, attributes);
+ if (maybe_transition != NULL) {
+ maybe_transition->DeprecateTransitionTree();
+ transition_target_deprecated = true;
}
// Don't overwrite the empty descriptor array.
@@ -2245,15 +2260,11 @@ Map* Map::FindLastMatchMap(int verbatim,
Map* current = this;
for (int i = verbatim; i < length; i++) {
- if (!current->HasTransitionArray()) break;
Name* name = descriptors->GetKey(i);
PropertyDetails details = descriptors->GetDetails(i);
- TransitionArray* transitions = current->transitions();
- int transition =
- transitions->Search(details.kind(), name, details.attributes());
- if (transition == TransitionArray::kNotFound) break;
-
- Map* next = transitions->GetTarget(transition);
+ Map* next = TransitionArray::SearchTransition(current, details.kind(), name,
+ details.attributes());
+ if (next == NULL) break;
DescriptorArray* next_descriptors = next->instance_descriptors();
PropertyDetails next_details = next_descriptors->GetDetails(i);
@@ -2296,25 +2307,26 @@ Map* Map::FindFieldOwner(int descriptor) {
void Map::UpdateFieldType(int descriptor, Handle<Name> name,
Representation new_representation,
- Handle<HeapType> new_type) {
+ Handle<Object> new_wrapped_type) {
+ DCHECK(new_wrapped_type->IsSmi() || new_wrapped_type->IsWeakCell());
DisallowHeapAllocation no_allocation;
PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
if (details.type() != DATA) return;
- if (HasTransitionArray()) {
- TransitionArray* transitions = this->transitions();
- for (int i = 0; i < transitions->number_of_transitions(); ++i) {
- transitions->GetTarget(i)
- ->UpdateFieldType(descriptor, name, new_representation, new_type);
- }
+ Object* transitions = raw_transitions();
+ int num_transitions = TransitionArray::NumberOfTransitions(transitions);
+ for (int i = 0; i < num_transitions; ++i) {
+ Map* target = TransitionArray::GetTarget(transitions, i);
+ target->UpdateFieldType(descriptor, name, new_representation,
+ new_wrapped_type);
}
// It is allowed to change representation here only from None to something.
DCHECK(details.representation().Equals(new_representation) ||
details.representation().IsNone());
// Skip if already updated the shared descriptor.
- if (instance_descriptors()->GetFieldType(descriptor) == *new_type) return;
+ if (instance_descriptors()->GetValue(descriptor) == *new_wrapped_type) return;
DataDescriptor d(name, instance_descriptors()->GetFieldIndex(descriptor),
- new_type, details.attributes(), new_representation);
+ new_wrapped_type, details.attributes(), new_representation);
instance_descriptors()->Replace(descriptor, &d);
}
@@ -2355,15 +2367,24 @@ void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
Handle<DescriptorArray> descriptors(
field_owner->instance_descriptors(), isolate);
DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
-
- // Determine the generalized new field type.
- new_field_type = Map::GeneralizeFieldType(
- old_field_type, new_field_type, isolate);
+ bool old_field_type_was_cleared =
+ old_field_type->Is(HeapType::None()) && old_representation.IsHeapObject();
+
+ // Determine the generalized new field type. Conservatively assume type Any
+ // for cleared field types because the cleared type could have been a
+ // deprecated map and there still could be live instances with a non-
+ // deprecated version of the map.
+ new_field_type =
+ old_field_type_was_cleared
+ ? HeapType::Any(isolate)
+ : Map::GeneralizeFieldType(old_field_type, new_field_type, isolate);
PropertyDetails details = descriptors->GetDetails(modify_index);
Handle<Name> name(descriptors->GetKey(modify_index));
+
+ Handle<Object> wrapped_type(WrapType(new_field_type));
field_owner->UpdateFieldType(modify_index, name, new_representation,
- new_field_type);
+ wrapped_type);
field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
isolate, DependentCode::kFieldTypeGroup);
@@ -2543,10 +2564,11 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
next_attributes = old_details.attributes();
next_representation = old_details.representation();
}
- int j = target_map->SearchTransition(next_kind, old_descriptors->GetKey(i),
- next_attributes);
- if (j == TransitionArray::kNotFound) break;
- Handle<Map> tmp_map(target_map->GetTransition(j), isolate);
+ Map* transition = TransitionArray::SearchTransition(
+ *target_map, next_kind, old_descriptors->GetKey(i), next_attributes);
+ if (transition == NULL) break;
+ Handle<Map> tmp_map(transition, isolate);
+
Handle<DescriptorArray> tmp_descriptors = handle(
tmp_map->instance_descriptors(), isolate);
@@ -2631,10 +2653,10 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
next_kind = old_details.kind();
next_attributes = old_details.attributes();
}
- int j = target_map->SearchTransition(next_kind, old_descriptors->GetKey(i),
- next_attributes);
- if (j == TransitionArray::kNotFound) break;
- Handle<Map> tmp_map(target_map->GetTransition(j), isolate);
+ Map* transition = TransitionArray::SearchTransition(
+ *target_map, next_kind, old_descriptors->GetKey(i), next_attributes);
+ if (transition == NULL) break;
+ Handle<Map> tmp_map(transition, isolate);
Handle<DescriptorArray> tmp_descriptors(
tmp_map->instance_descriptors(), isolate);
@@ -2755,7 +2777,8 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
next_field_type =
GeneralizeFieldType(target_field_type, old_field_type, isolate);
}
- DataDescriptor d(target_key, current_offset, next_field_type,
+ Handle<Object> wrapped_type(WrapType(next_field_type));
+ DataDescriptor d(target_key, current_offset, wrapped_type,
next_attributes, next_representation);
current_offset += d.GetDetails().field_width_in_words();
new_descriptors->Set(i, &d);
@@ -2823,8 +2846,10 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
next_field_type = old_field_type;
}
- DataDescriptor d(old_key, current_offset, next_field_type,
- next_attributes, next_representation);
+ Handle<Object> wrapped_type(WrapType(next_field_type));
+
+ DataDescriptor d(old_key, current_offset, wrapped_type, next_attributes,
+ next_representation);
current_offset += d.GetDetails().field_width_in_words();
new_descriptors->Set(i, &d);
} else {
@@ -2869,7 +2894,8 @@ Handle<Map> Map::ReconfigureProperty(Handle<Map> old_map, int modify_index,
// If |transition_target_deprecated| is true then the transition array
// already contains entry for given descriptor. This means that the transition
// could be inserted regardless of whether transitions array is full or not.
- if (!transition_target_deprecated && !split_map->CanHaveMoreTransitions()) {
+ if (!transition_target_deprecated &&
+ !TransitionArray::CanHaveMoreTransitions(split_map)) {
return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
new_kind, new_attributes,
"GenAll_CantHaveMoreTransitions");
@@ -2925,30 +2951,7 @@ Handle<Map> Map::GeneralizeAllFieldRepresentations(
// static
-MaybeHandle<Map> Map::TryUpdate(Handle<Map> map) {
- Handle<Map> proto_map(map);
- while (proto_map->prototype()->IsJSObject()) {
- Handle<JSObject> holder(JSObject::cast(proto_map->prototype()));
- proto_map = Handle<Map>(holder->map());
- if (proto_map->is_deprecated() && JSObject::TryMigrateInstance(holder)) {
- proto_map = Handle<Map>(holder->map());
- }
- }
- return TryUpdateInternal(map);
-}
-
-
-// static
-Handle<Map> Map::Update(Handle<Map> map) {
- if (!map->is_deprecated()) return map;
- return ReconfigureProperty(map, -1, kData, NONE, Representation::None(),
- HeapType::None(map->GetIsolate()),
- ALLOW_IN_DESCRIPTOR);
-}
-
-
-// static
-MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
+MaybeHandle<Map> Map::TryUpdate(Handle<Map> old_map) {
DisallowHeapAllocation no_allocation;
DisallowDeoptimization no_deoptimization(old_map->GetIsolate());
@@ -2965,11 +2968,11 @@ MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
Map* new_map = root_map;
for (int i = root_nof; i < old_nof; ++i) {
PropertyDetails old_details = old_descriptors->GetDetails(i);
- int j = new_map->SearchTransition(old_details.kind(),
- old_descriptors->GetKey(i),
- old_details.attributes());
- if (j == TransitionArray::kNotFound) return MaybeHandle<Map>();
- new_map = new_map->GetTransition(j);
+ Map* transition = TransitionArray::SearchTransition(
+ new_map, old_details.kind(), old_descriptors->GetKey(i),
+ old_details.attributes());
+ if (transition == NULL) return MaybeHandle<Map>();
+ new_map = transition;
DescriptorArray* new_descriptors = new_map->instance_descriptors();
PropertyDetails new_details = new_descriptors->GetDetails(i);
@@ -2978,33 +2981,41 @@ MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
if (!old_details.representation().fits_into(new_details.representation())) {
return MaybeHandle<Map>();
}
- Object* new_value = new_descriptors->GetValue(i);
- Object* old_value = old_descriptors->GetValue(i);
switch (new_details.type()) {
case DATA: {
- PropertyType old_type = old_details.type();
- if (old_type == DATA) {
- if (!HeapType::cast(old_value)->NowIs(HeapType::cast(new_value))) {
+ HeapType* new_type = new_descriptors->GetFieldType(i);
+ PropertyType old_property_type = old_details.type();
+ if (old_property_type == DATA) {
+ HeapType* old_type = old_descriptors->GetFieldType(i);
+ if (!old_type->NowIs(new_type)) {
return MaybeHandle<Map>();
}
} else {
- DCHECK(old_type == DATA_CONSTANT);
- if (!HeapType::cast(new_value)->NowContains(old_value)) {
+ DCHECK(old_property_type == DATA_CONSTANT);
+ Object* old_value = old_descriptors->GetValue(i);
+ if (!new_type->NowContains(old_value)) {
return MaybeHandle<Map>();
}
}
break;
}
- case ACCESSOR:
- DCHECK(HeapType::Any()->Is(HeapType::cast(new_value)));
+ case ACCESSOR: {
+#ifdef DEBUG
+ HeapType* new_type = new_descriptors->GetFieldType(i);
+ DCHECK(HeapType::Any()->Is(new_type));
+#endif
break;
+ }
case DATA_CONSTANT:
- case ACCESSOR_CONSTANT:
+ case ACCESSOR_CONSTANT: {
+ Object* old_value = old_descriptors->GetValue(i);
+ Object* new_value = new_descriptors->GetValue(i);
if (old_details.location() == kField || old_value != new_value) {
return MaybeHandle<Map>();
}
break;
+ }
}
}
if (new_map->NumberOfOwnDescriptors() != old_nof) return MaybeHandle<Map>();
@@ -3012,6 +3023,15 @@ MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
}
+// static
+Handle<Map> Map::Update(Handle<Map> map) {
+ if (!map->is_deprecated()) return map;
+ return ReconfigureProperty(map, -1, kData, NONE, Representation::None(),
+ HeapType::None(map->GetIsolate()),
+ ALLOW_IN_DESCRIPTOR);
+}
+
+
MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
Handle<Object> value) {
Handle<Name> name = it->name();
@@ -3068,7 +3088,7 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
// TODO(verwaest): Remove the distinction. This is mostly bogus since we
// don't know whether we'll want to fetch attributes or call a setter
// until we find the property.
- if (it->HasAccess(v8::ACCESS_SET)) break;
+ if (it->HasAccess()) break;
return JSObject::SetPropertyWithFailedAccessCheck(it, value,
language_mode);
@@ -3099,9 +3119,9 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
Maybe<PropertyAttributes> maybe_attributes =
JSObject::GetPropertyAttributesWithInterceptor(
it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
- if (!maybe_attributes.has_value) return MaybeHandle<Object>();
- done = maybe_attributes.value != ABSENT;
- if (done && (maybe_attributes.value & READ_ONLY) != 0) {
+ if (!maybe_attributes.IsJust()) return MaybeHandle<Object>();
+ done = maybe_attributes.FromJust() != ABSENT;
+ if (done && (maybe_attributes.FromJust() & READ_ONLY) != 0) {
return WriteToReadOnlyProperty(it, value, language_mode);
}
}
@@ -3115,6 +3135,10 @@ MaybeHandle<Object> Object::SetPropertyInternal(LookupIterator* it,
it->GetHolder<JSObject>(),
it->GetAccessors(), language_mode);
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ done = true;
+ break;
+
case LookupIterator::DATA:
if (it->property_details().IsReadOnly()) {
return WriteToReadOnlyProperty(it, value, language_mode);
@@ -3177,6 +3201,9 @@ MaybeHandle<Object> Object::SetSuperProperty(LookupIterator* it,
return JSObject::AddDataProperty(&own_lookup, value, NONE, language_mode,
store_mode);
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return result;
+
case LookupIterator::DATA: {
PropertyDetails details = own_lookup.property_details();
if (details.IsConfigurable() || !details.IsReadOnly()) {
@@ -3306,16 +3333,14 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
return WriteToReadOnlyProperty(it, value, language_mode);
}
+ if (it->state() == LookupIterator::INTEGER_INDEXED_EXOTIC) return value;
+
Handle<JSObject> receiver = it->GetStoreTarget();
// If the receiver is a JSGlobalProxy, store on the prototype (JSGlobalObject)
// instead. If the prototype is Null, the proxy is detached.
if (receiver->IsJSGlobalProxy()) return value;
- // If the receiver is Indexed Exotic object (currently only typed arrays),
- // disallow adding properties with numeric names.
- if (it->IsSpecialNumericIndex()) return value;
-
// Possibly migrate to the most up-to-date map that will be able to store
// |value| under it->name() with |attributes|.
it->PrepareTransitionToDataProperty(value, attributes, store_mode);
@@ -3370,9 +3395,9 @@ MaybeHandle<Object> JSObject::SetElementWithCallbackSetterInPrototypes(
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
if (js_proto->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(js_proto, index, v8::ACCESS_SET)) {
+ if (!isolate->MayAccess(js_proto)) {
*found = true;
- isolate->ReportFailedAccessCheck(js_proto, v8::ACCESS_SET);
+ isolate->ReportFailedAccessCheck(js_proto);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return MaybeHandle<Object>();
}
@@ -3594,9 +3619,9 @@ static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
// have the cached transition.
if (IsExternalArrayElementsKind(to_kind) &&
!IsFixedTypedArrayElementsKind(map->elements_kind())) {
- if (map->HasElementsTransition()) {
- Map* next_map = map->elements_transition_map();
- if (next_map->elements_kind() == to_kind) return next_map;
+ Map* next_map = map->ElementsTransitionMap();
+ if (next_map != NULL && next_map->elements_kind() == to_kind) {
+ return next_map;
}
return map;
}
@@ -3604,13 +3629,14 @@ static Map* FindClosestElementsTransition(Map* map, ElementsKind to_kind) {
ElementsKind kind = map->elements_kind();
while (kind != target_kind) {
kind = GetNextTransitionElementsKind(kind);
- if (!current_map->HasElementsTransition()) return current_map;
- current_map = current_map->elements_transition_map();
+ Map* next_map = current_map->ElementsTransitionMap();
+ if (next_map == NULL) return current_map;
+ current_map = next_map;
}
- if (to_kind != kind && current_map->HasElementsTransition()) {
+ Map* next_map = current_map->ElementsTransitionMap();
+ if (to_kind != kind && next_map != NULL) {
DCHECK(to_kind == DICTIONARY_ELEMENTS);
- Map* next_map = current_map->elements_transition_map();
if (next_map->elements_kind() == to_kind) return next_map;
}
@@ -3642,15 +3668,11 @@ bool Map::IsMapInArrayPrototypeChain() {
Handle<WeakCell> Map::WeakCellForMap(Handle<Map> map) {
Isolate* isolate = map->GetIsolate();
- if (map->code_cache()->IsFixedArray()) {
- return isolate->factory()->NewWeakCell(map);
- }
- Handle<CodeCache> code_cache(CodeCache::cast(map->code_cache()), isolate);
- if (code_cache->weak_cell_cache()->IsWeakCell()) {
- return Handle<WeakCell>(WeakCell::cast(code_cache->weak_cell_cache()));
+ if (map->weak_cell_cache()->IsWeakCell()) {
+ return Handle<WeakCell>(WeakCell::cast(map->weak_cell_cache()));
}
Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(map);
- code_cache->set_weak_cell_cache(*weak_cell);
+ map->set_weak_cell_cache(*weak_cell);
return weak_cell;
}
@@ -3760,16 +3782,16 @@ Maybe<bool> JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy,
Isolate* isolate = proxy->GetIsolate();
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return maybe(false);
+ if (name->IsSymbol()) return Just(false);
Handle<Object> args[] = { name };
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, result, CallTrap(proxy, "has", isolate->derived_has_trap(),
arraysize(args), args),
- Maybe<bool>());
+ Nothing<bool>());
- return maybe(result->BooleanValue());
+ return Just(result->BooleanValue());
}
@@ -3936,17 +3958,16 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
HandleScope scope(isolate);
// TODO(rossberg): adjust once there is a story for symbols vs proxies.
- if (name->IsSymbol()) return maybe(ABSENT);
+ if (name->IsSymbol()) return Just(ABSENT);
Handle<Object> args[] = { name };
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, result,
- proxy->CallTrap(proxy, "getPropertyDescriptor", Handle<Object>(),
- arraysize(args), args),
- Maybe<PropertyAttributes>());
+ isolate, result, proxy->CallTrap(proxy, "getPropertyDescriptor",
+ Handle<Object>(), arraysize(args), args),
+ Nothing<PropertyAttributes>());
- if (result->IsUndefined()) return maybe(ABSENT);
+ if (result->IsUndefined()) return Just(ABSENT);
Handle<Object> argv[] = { result };
Handle<Object> desc;
@@ -3954,7 +3975,7 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
isolate, desc,
Execution::Call(isolate, isolate->to_complete_property_descriptor(),
result, arraysize(argv), argv),
- Maybe<PropertyAttributes>());
+ Nothing<PropertyAttributes>());
// Convert result to PropertyAttributes.
Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
@@ -3962,26 +3983,26 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
Handle<Object> enumerable;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, enumerable,
Object::GetProperty(desc, enum_n),
- Maybe<PropertyAttributes>());
+ Nothing<PropertyAttributes>());
Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("configurable_"));
Handle<Object> configurable;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, configurable,
Object::GetProperty(desc, conf_n),
- Maybe<PropertyAttributes>());
+ Nothing<PropertyAttributes>());
Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("writable_"));
Handle<Object> writable;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, writable,
Object::GetProperty(desc, writ_n),
- Maybe<PropertyAttributes>());
+ Nothing<PropertyAttributes>());
if (!writable->BooleanValue()) {
Handle<String> set_n = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("set_"));
Handle<Object> setter;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, setter,
Object::GetProperty(desc, set_n),
- Maybe<PropertyAttributes>());
+ Nothing<PropertyAttributes>());
writable = isolate->factory()->ToBoolean(!setter->IsUndefined());
}
@@ -3990,18 +4011,17 @@ Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
Handle<String> trap = isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("getPropertyDescriptor"));
Handle<Object> args[] = { handler, trap, name };
- Handle<Object> error;
- MaybeHandle<Object> maybe_error = isolate->factory()->NewTypeError(
+ Handle<Object> error = isolate->factory()->NewTypeError(
"proxy_prop_not_configurable", HandleVector(args, arraysize(args)));
- if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
- return maybe(NONE);
+ isolate->Throw(*error);
+ return Just(NONE);
}
int attributes = NONE;
if (!enumerable->BooleanValue()) attributes |= DONT_ENUM;
if (!configurable->BooleanValue()) attributes |= DONT_DELETE;
if (!writable->BooleanValue()) attributes |= READ_ONLY;
- return maybe(static_cast<PropertyAttributes>(attributes));
+ return Just(static_cast<PropertyAttributes>(attributes));
}
@@ -4114,31 +4134,6 @@ bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
}
-void JSObject::WriteToField(int descriptor, Object* value) {
- DisallowHeapAllocation no_gc;
-
- DescriptorArray* desc = map()->instance_descriptors();
- PropertyDetails details = desc->GetDetails(descriptor);
-
- DCHECK(details.type() == DATA);
-
- FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
- if (details.representation().IsDouble()) {
- // Nothing more to be done.
- if (value->IsUninitialized()) return;
- if (IsUnboxedDoubleField(index)) {
- RawFastDoublePropertyAtPut(index, value->Number());
- } else {
- HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
- DCHECK(box->IsMutableHeapNumber());
- box->set_value(value->Number());
- }
- } else {
- RawFastPropertyAtPut(index, value);
- }
-}
-
-
void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
Handle<Object> value,
PropertyAttributes attributes) {
@@ -4149,7 +4144,7 @@ void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
DCHECK(!object->IsJSProxy());
DCHECK(!name->AsArrayIndex(&index));
Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
- DCHECK(maybe.has_value);
+ DCHECK(maybe.IsJust());
DCHECK(!it.IsFound());
DCHECK(object->map()->is_extensible() ||
it.isolate()->IsInternallyUsedPropertyName(name));
@@ -4173,6 +4168,9 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
!it.isolate()->IsInternallyUsedPropertyName(name);
for (; it.IsFound(); it.Next()) {
switch (it.state()) {
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return value;
+
case LookupIterator::INTERCEPTOR:
case LookupIterator::JSPROXY:
case LookupIterator::NOT_FOUND:
@@ -4180,7 +4178,7 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
UNREACHABLE();
case LookupIterator::ACCESS_CHECK:
- if (!it.isolate()->MayNamedAccess(object, name, v8::ACCESS_SET)) {
+ if (!it.isolate()->MayAccess(object)) {
return SetPropertyWithFailedAccessCheck(&it, value, SLOPPY);
}
break;
@@ -4288,7 +4286,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
- return maybe(ABSENT);
+ return Just(ABSENT);
}
PropertyCallbackArguments args(
isolate, interceptor->data(), *receiver, *holder);
@@ -4301,7 +4299,7 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
v8::Handle<v8::Integer> result = args.Call(query, v8::Utils::ToLocal(name));
if (!result.IsEmpty()) {
DCHECK(result->IsInt32());
- return maybe(static_cast<PropertyAttributes>(result->Int32Value()));
+ return Just(static_cast<PropertyAttributes>(result->Int32Value()));
}
} else if (!interceptor->getter()->IsUndefined()) {
v8::GenericNamedPropertyGetterCallback getter =
@@ -4310,11 +4308,11 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-get-has", *holder, *name));
v8::Handle<v8::Value> result = args.Call(getter, v8::Utils::ToLocal(name));
- if (!result.IsEmpty()) return maybe(DONT_ENUM);
+ if (!result.IsEmpty()) return Just(DONT_ENUM);
}
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<PropertyAttributes>());
- return maybe(ABSENT);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+ return Just(ABSENT);
}
@@ -4344,19 +4342,21 @@ Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
Maybe<PropertyAttributes> result =
JSObject::GetPropertyAttributesWithInterceptor(
it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
- if (!result.has_value) return result;
- if (result.value != ABSENT) return result;
+ if (!result.IsJust()) return result;
+ if (result.FromJust() != ABSENT) return result;
break;
}
case LookupIterator::ACCESS_CHECK:
- if (it->HasAccess(v8::ACCESS_HAS)) break;
+ if (it->HasAccess()) break;
return JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return Just(ABSENT);
case LookupIterator::ACCESSOR:
case LookupIterator::DATA:
- return maybe(it->property_details().attributes());
+ return Just(it->property_details().attributes());
}
}
- return maybe(ABSENT);
+ return Just(ABSENT);
}
@@ -4367,7 +4367,7 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributeWithReceiver(
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_HAS)) {
+ if (!isolate->MayAccess(object)) {
return GetElementAttributesWithFailedAccessCheck(isolate, object,
receiver, index);
}
@@ -4375,7 +4375,7 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributeWithReceiver(
if (object->IsJSGlobalProxy()) {
PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return maybe(ABSENT);
+ if (iter.IsAtEnd()) return Just(ABSENT);
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
return JSObject::GetElementAttributeWithReceiver(
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
@@ -4405,8 +4405,9 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributeWithInterceptor(
Maybe<PropertyAttributes> from_interceptor =
GetElementAttributeFromInterceptor(object, receiver, index);
- if (!from_interceptor.has_value) return Maybe<PropertyAttributes>();
- if (from_interceptor.value != ABSENT) return maybe(from_interceptor.value);
+ if (!from_interceptor.IsJust()) return Nothing<PropertyAttributes>();
+ if (from_interceptor.FromJust() != ABSENT)
+ return Just(from_interceptor.FromJust());
return GetElementAttributeWithoutInterceptor(object, receiver, index,
check_prototype);
@@ -4428,7 +4429,7 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributeFromInterceptor(
ApiIndexedPropertyAccess("interceptor-indexed-has", *object, index));
v8::Handle<v8::Integer> result = args.Call(query, index);
if (!result.IsEmpty())
- return maybe(static_cast<PropertyAttributes>(result->Int32Value()));
+ return Just(static_cast<PropertyAttributes>(result->Int32Value()));
} else if (!interceptor->getter()->IsUndefined()) {
v8::IndexedPropertyGetterCallback getter =
v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
@@ -4436,10 +4437,10 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributeFromInterceptor(
ApiIndexedPropertyAccess(
"interceptor-indexed-get-has", *object, index));
v8::Handle<v8::Value> result = args.Call(getter, index);
- if (!result.IsEmpty()) return maybe(NONE);
+ if (!result.IsEmpty()) return Just(NONE);
}
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<PropertyAttributes>());
- return maybe(ABSENT);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<PropertyAttributes>());
+ return Just(ABSENT);
}
@@ -4448,14 +4449,14 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributeWithoutInterceptor(
bool check_prototype) {
PropertyAttributes attr =
object->GetElementsAccessor()->GetAttributes(object, index);
- if (attr != ABSENT) return maybe(attr);
+ if (attr != ABSENT) return Just(attr);
// Handle [] on String objects.
if (object->IsStringObjectWithCharacterAt(index)) {
- return maybe(static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE));
+ return Just(static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE));
}
- if (!check_prototype) return maybe(ABSENT);
+ if (!check_prototype) return Just(ABSENT);
PrototypeIterator iter(object->GetIsolate(), object);
if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
@@ -4464,7 +4465,7 @@ Maybe<PropertyAttributes> JSObject::GetElementAttributeWithoutInterceptor(
Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
index);
}
- if (iter.IsAtEnd()) return maybe(ABSENT);
+ if (iter.IsAtEnd()) return Just(ABSENT);
return GetElementAttributeWithReceiver(
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
index, true);
@@ -4557,7 +4558,8 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
switch (details.type()) {
case DATA_CONSTANT: {
Handle<Object> value(descs->GetConstant(i), isolate);
- PropertyDetails d(details.attributes(), DATA, i + 1);
+ PropertyDetails d(details.attributes(), DATA, i + 1,
+ PropertyCellType::kInvalid);
dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
@@ -4575,20 +4577,23 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
value = isolate->factory()->NewHeapNumber(old->value());
}
}
- PropertyDetails d(details.attributes(), DATA, i + 1);
+ PropertyDetails d(details.attributes(), DATA, i + 1,
+ PropertyCellType::kInvalid);
dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
case ACCESSOR: {
FieldIndex index = FieldIndex::ForDescriptor(*map, i);
Handle<Object> value(object->RawFastPropertyAt(index), isolate);
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1);
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+ PropertyCellType::kInvalid);
dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
case ACCESSOR_CONSTANT: {
Handle<Object> value(descs->GetCallbacksObject(i), isolate);
- PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1);
+ PropertyDetails d(details.attributes(), ACCESSOR_CONSTANT, i + 1,
+ PropertyCellType::kInvalid);
dictionary = NameDictionary::Add(dictionary, key, value, d);
break;
}
@@ -4821,7 +4826,7 @@ static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
value = handle(Handle<FixedArray>::cast(array)->get(i), isolate);
}
if (!value->IsTheHole()) {
- PropertyDetails details(NONE, DATA, 0);
+ PropertyDetails details = PropertyDetails::Empty();
dictionary =
SeededNumberDictionary::AddNumberEntry(dictionary, i, value, details);
}
@@ -5073,8 +5078,8 @@ bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
LookupIterator it(object, hidden, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
// Cannot get an exception since the hidden_string isn't accessible to JS.
- DCHECK(maybe.has_value);
- return maybe.value != ABSENT;
+ DCHECK(maybe.IsJust());
+ return maybe.FromJust() != ABSENT;
}
@@ -5217,9 +5222,8 @@ MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
Factory* factory = isolate->factory();
// Check access rights if needed.
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(object, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_DELETE);
+ if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ isolate->ReportFailedAccessCheck(object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->false_value();
}
@@ -5250,8 +5254,8 @@ MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
bool should_enqueue_change_record = false;
if (object->map()->is_observed()) {
Maybe<bool> maybe = HasOwnElement(object, index);
- if (!maybe.has_value) return MaybeHandle<Object>();
- should_enqueue_change_record = maybe.value;
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
+ should_enqueue_change_record = maybe.FromJust();
if (should_enqueue_change_record) {
if (!GetOwnElementAccessorPair(object, index).is_null()) {
old_value = Handle<Object>::cast(factory->the_hole_value());
@@ -5275,8 +5279,8 @@ MaybeHandle<Object> JSObject::DeleteElement(Handle<JSObject> object,
if (should_enqueue_change_record) {
Maybe<bool> maybe = HasOwnElement(object, index);
- if (!maybe.has_value) return MaybeHandle<Object>();
- if (!maybe.value) {
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
+ if (!maybe.FromJust()) {
Handle<String> name = factory->Uint32ToString(index);
RETURN_ON_EXCEPTION(
isolate, EnqueueChangeRecord(object, "delete", name, old_value),
@@ -5296,14 +5300,13 @@ void JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
int entry = dictionary->FindEntry(name);
DCHECK_NE(NameDictionary::kNotFound, entry);
- // If we have a global object set the cell to the hole.
+ // If we have a global object, invalidate the cell and swap in a new one.
if (object->IsGlobalObject()) {
- PropertyDetails details = dictionary->DetailsAt(entry);
- DCHECK(details.IsConfigurable());
- Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
- Handle<Object> value = isolate->factory()->the_hole_value();
- PropertyCell::SetValueInferType(cell, value);
- dictionary->DetailsAtPut(entry, details.AsDeleted());
+ auto cell = PropertyCell::InvalidateEntry(dictionary, entry);
+ cell->set_value(isolate->heap()->the_hole_value());
+ // TODO(dcarney): InvalidateForDelete
+ dictionary->DetailsAtPut(entry, dictionary->DetailsAt(entry).set_cell_type(
+ PropertyCellType::kDeleted));
return;
}
@@ -5338,9 +5341,8 @@ MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
case LookupIterator::TRANSITION:
UNREACHABLE();
case LookupIterator::ACCESS_CHECK:
- if (it.HasAccess(v8::ACCESS_DELETE)) break;
- it.isolate()->ReportFailedAccessCheck(it.GetHolder<JSObject>(),
- v8::ACCESS_DELETE);
+ if (it.HasAccess()) break;
+ it.isolate()->ReportFailedAccessCheck(it.GetHolder<JSObject>());
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it.isolate(), Object);
return it.isolate()->factory()->false_value();
case LookupIterator::INTERCEPTOR: {
@@ -5353,6 +5355,8 @@ MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
if (it.isolate()->has_pending_exception()) return maybe_result;
break;
}
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return it.isolate()->factory()->true_value();
case LookupIterator::DATA:
if (is_observed) {
old_value = it.GetDataValue();
@@ -5454,7 +5458,7 @@ bool JSObject::ReferencesObject(Object* obj) {
DisallowHeapAllocation no_allocation;
// Is the object the constructor for this object?
- if (map_of_this->constructor() == obj) {
+ if (map_of_this->GetConstructor() == obj) {
return true;
}
@@ -5518,7 +5522,7 @@ bool JSObject::ReferencesObject(Object* obj) {
Map* arguments_map =
heap->isolate()->context()->native_context()->sloppy_arguments_map();
JSFunction* arguments_function =
- JSFunction::cast(arguments_map->constructor());
+ JSFunction::cast(arguments_map->GetConstructor());
// Get the context and don't check if it is the native context.
JSFunction* f = JSFunction::cast(this);
@@ -5533,7 +5537,7 @@ bool JSObject::ReferencesObject(Object* obj) {
if (context->get(i)->IsJSObject()) {
JSObject* ctxobj = JSObject::cast(context->get(i));
// If it is an arguments array check the content.
- if (ctxobj->map()->constructor() == arguments_function) {
+ if (ctxobj->map()->GetConstructor() == arguments_function) {
if (ctxobj->ReferencesObject(obj)) {
return true;
}
@@ -5547,7 +5551,7 @@ bool JSObject::ReferencesObject(Object* obj) {
if (context->has_extension() && !context->IsCatchContext()) {
// With harmony scoping, a JSFunction may have a global context.
// TODO(mvstanton): walk into the ScopeInfo.
- if (FLAG_harmony_scoping && context->IsScriptContext()) {
+ if (context->IsScriptContext()) {
return false;
}
@@ -5569,10 +5573,8 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
Isolate* isolate = object->GetIsolate();
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(
- object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS);
+ if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ isolate->ReportFailedAccessCheck(object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5680,10 +5682,8 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
DCHECK(!object->map()->is_observed());
Isolate* isolate = object->GetIsolate();
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(
- object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS);
+ if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ isolate->ReportFailedAccessCheck(object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->false_value();
}
@@ -5721,13 +5721,15 @@ MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
}
Handle<Map> old_map(object->map(), isolate);
- int transition_index = old_map->SearchSpecialTransition(*transition_marker);
- if (transition_index != TransitionArray::kNotFound) {
- Handle<Map> transition_map(old_map->GetTransition(transition_index));
+ Map* transition =
+ TransitionArray::SearchSpecial(*old_map, *transition_marker);
+ if (transition != NULL) {
+ Handle<Map> transition_map(transition, isolate);
DCHECK(transition_map->has_dictionary_elements());
DCHECK(!transition_map->is_extensible());
JSObject::MigrateToMap(object, transition_map);
- } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
+ } else if (object->HasFastProperties() &&
+ TransitionArray::CanHaveMoreTransitions(old_map)) {
// Create a new descriptor array with the appropriate property attributes
Handle<Map> new_map = Map::CopyForPreventExtensions(
old_map, attrs, transition_marker, "CopyForPreventExtensions");
@@ -5786,12 +5788,13 @@ void JSObject::SetObserved(Handle<JSObject> object) {
Handle<Map> new_map;
Handle<Map> old_map(object->map(), isolate);
DCHECK(!old_map->is_observed());
- int transition_index =
- old_map->SearchSpecialTransition(isolate->heap()->observed_symbol());
- if (transition_index != TransitionArray::kNotFound) {
- new_map = handle(old_map->GetTransition(transition_index), isolate);
+ Map* transition = TransitionArray::SearchSpecial(
+ *old_map, isolate->heap()->observed_symbol());
+ if (transition != NULL) {
+ new_map = handle(transition, isolate);
DCHECK(new_map->is_observed());
- } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
+ } else if (object->HasFastProperties() &&
+ TransitionArray::CanHaveMoreTransitions(old_map)) {
new_map = Map::CopyForObserved(old_map);
} else {
new_map = Map::Copy(old_map, "SlowObserved");
@@ -5932,8 +5935,8 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
Handle<String> key_string(String::cast(names->get(i)));
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnPropertyAttributes(copy, key_string);
- DCHECK(maybe.has_value);
- PropertyAttributes attributes = maybe.value;
+ DCHECK(maybe.IsJust());
+ PropertyAttributes attributes = maybe.FromJust();
// Only deep copy fields from the object literal expression.
// In particular, don't try to copy the length attribute of
// an array.
@@ -6240,12 +6243,12 @@ static Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object,
return storage;
} else {
Handle<NameDictionary> dictionary(object->property_dictionary());
- int length = dictionary->NumberOfEnumElements();
+ int length = dictionary->NumberOfEnumElements(*object);
if (length == 0) {
return Handle<FixedArray>(isolate->heap()->empty_fixed_array());
}
Handle<FixedArray> storage = isolate->factory()->NewFixedArray(length);
- dictionary->CopyEnumKeysTo(*storage);
+ dictionary->CopyEnumKeysTo(*object, *storage);
return storage;
}
}
@@ -6257,7 +6260,7 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
Isolate* isolate = object->GetIsolate();
Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
Handle<JSFunction> arguments_function(
- JSFunction::cast(isolate->sloppy_arguments_map()->constructor()));
+ JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
// Only collect keys if access is permitted.
for (PrototypeIterator iter(isolate, object,
@@ -6288,10 +6291,8 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
// Check access rights if required.
- if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(
- current, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(current, v8::ACCESS_KEYS);
+ if (current->IsAccessCheckNeeded() && !isolate->MayAccess(current)) {
+ isolate->ReportFailedAccessCheck(current);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, FixedArray);
break;
}
@@ -6330,11 +6331,9 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
// array or dictionary. So the fast inline test for whether to
// use the cache says yes, so we should not create a cache.
bool cache_enum_keys =
- ((current->map()->constructor() != *arguments_function) &&
- !current->IsJSValue() &&
- !current->IsAccessCheckNeeded() &&
- !current->HasNamedInterceptor() &&
- !current->HasIndexedInterceptor());
+ ((current->map()->GetConstructor() != *arguments_function) &&
+ !current->IsJSValue() && !current->IsAccessCheckNeeded() &&
+ !current->HasNamedInterceptor() && !current->HasIndexedInterceptor());
// Compute the property keys and cache them if possible.
ASSIGN_RETURN_ON_EXCEPTION(
isolate, content,
@@ -6380,7 +6379,8 @@ static bool UpdateGetterSetterInDictionary(
DCHECK(details.IsConfigurable());
if (details.attributes() != attributes) {
dictionary->DetailsAtPut(
- entry, PropertyDetails(attributes, ACCESSOR_CONSTANT, index));
+ entry, PropertyDetails(attributes, ACCESSOR_CONSTANT, index,
+ PropertyCellType::kInvalid));
}
AccessorPair::cast(result)->SetComponents(getter, setter);
return true;
@@ -6482,7 +6482,8 @@ void JSObject::SetElementCallback(Handle<JSObject> object,
Handle<Object> structure,
PropertyAttributes attributes) {
Heap* heap = object->GetHeap();
- PropertyDetails details = PropertyDetails(attributes, ACCESSOR_CONSTANT, 0);
+ PropertyDetails details = PropertyDetails(attributes, ACCESSOR_CONSTANT, 0,
+ PropertyCellType::kInvalid);
// Normalize elements to make this operation simple.
bool had_dictionary_elements = object->HasDictionaryElements();
@@ -6527,28 +6528,10 @@ void JSObject::SetPropertyCallback(Handle<JSObject> object,
// Normalize object to make this operation simple.
NormalizeProperties(object, mode, 0, "SetPropertyCallback");
- // For the global object allocate a new map to invalidate the global inline
- // caches which have a global property cell reference directly in the code.
- if (object->IsGlobalObject()) {
- Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
- DCHECK(new_map->is_dictionary_map());
-#if TRACE_MAPS
- if (FLAG_trace_maps) {
- PrintF("[TraceMaps: GlobalPropertyCallback from= %p to= %p ]\n",
- reinterpret_cast<void*>(object->map()),
- reinterpret_cast<void*>(*new_map));
- }
-#endif
- JSObject::MigrateToMap(object, new_map);
-
- // When running crankshaft, changing the map is not enough. We
- // need to deoptimize all functions that rely on this global
- // object.
- Deoptimizer::DeoptimizeGlobalObject(*object);
- }
// Update the dictionary with the new ACCESSOR_CONSTANT property.
- PropertyDetails details = PropertyDetails(attributes, ACCESSOR_CONSTANT, 0);
+ PropertyDetails details = PropertyDetails(attributes, ACCESSOR_CONSTANT, 0,
+ PropertyCellType::kMutable);
SetNormalizedProperty(object, name, structure, details);
ReoptimizeIfPrototype(object);
@@ -6562,9 +6545,8 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
PropertyAttributes attributes) {
Isolate* isolate = object->GetIsolate();
// Check access rights if needed.
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET);
+ if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ isolate->ReportFailedAccessCheck(object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
@@ -6597,18 +6579,18 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
Maybe<bool> maybe = HasOwnElement(object, index);
// Workaround for a GCC 4.4.3 bug which leads to "‘preexists’ may be used
// uninitialized in this function".
- if (!maybe.has_value) {
+ if (!maybe.IsJust()) {
DCHECK(false);
return isolate->factory()->undefined_value();
}
- preexists = maybe.value;
+ preexists = maybe.FromJust();
if (preexists && GetOwnElementAccessorPair(object, index).is_null()) {
old_value =
Object::GetElement(isolate, object, index).ToHandleChecked();
}
} else {
LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
- CHECK(GetPropertyAttributes(&it).has_value);
+ CHECK(GetPropertyAttributes(&it).IsJust());
preexists = it.IsFound();
if (preexists && (it.state() == LookupIterator::DATA ||
it.GetAccessors()->IsAccessorInfo())) {
@@ -6656,9 +6638,8 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
Handle<Name> name(Name::cast(info->name()));
// Check access rights if needed.
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET);
+ if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ isolate->ReportFailedAccessCheck(object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return factory->undefined_value();
}
@@ -6715,7 +6696,7 @@ MaybeHandle<Object> JSObject::SetAccessor(Handle<JSObject> object,
} else {
// Lookup the name.
LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
- CHECK(GetPropertyAttributes(&it).has_value);
+ CHECK(GetPropertyAttributes(&it).IsJust());
// ES5 forbids turning a property into an accessor if it's not
// configurable. See 8.6.1 (Table 5).
if (it.IsFound() && (it.IsReadOnly() || !it.IsConfigurable())) {
@@ -6747,10 +6728,8 @@ MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
Handle<Object> current = PrototypeIterator::GetCurrent(iter);
// Check access rights if needed.
if (current->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(Handle<JSObject>::cast(current), name,
- v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(Handle<JSObject>::cast(current),
- v8::ACCESS_HAS);
+ !isolate->MayAccess(Handle<JSObject>::cast(current))) {
+ isolate->ReportFailedAccessCheck(Handle<JSObject>::cast(current));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
@@ -6781,15 +6760,16 @@ MaybeHandle<Object> JSObject::GetAccessor(Handle<JSObject> object,
UNREACHABLE();
case LookupIterator::ACCESS_CHECK:
- if (it.HasAccess(v8::ACCESS_HAS)) continue;
- isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>(),
- v8::ACCESS_HAS);
+ if (it.HasAccess()) continue;
+ isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>());
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
case LookupIterator::JSPROXY:
return isolate->factory()->undefined_value();
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
+ return isolate->factory()->undefined_value();
case LookupIterator::DATA:
continue;
case LookupIterator::ACCESSOR: {
@@ -6850,7 +6830,7 @@ Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) {
Handle<Map> result = map->GetIsolate()->factory()->NewMap(
map->instance_type(), instance_size);
result->SetPrototype(handle(map->prototype(), map->GetIsolate()));
- result->set_constructor(map->constructor());
+ result->set_constructor_or_backpointer(map->GetConstructor());
result->set_bit_field(map->bit_field());
result->set_bit_field2(map->bit_field2());
int new_bit_field3 = map->bit_field3();
@@ -6888,7 +6868,7 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
if (FLAG_enable_slow_asserts) {
// The cached map should match newly created normalized map bit-by-bit,
// except for the code cache, which can contain some ics which can be
- // applied to the shared map.
+ // applied to the shared map, dependent code and weak cell cache.
Handle<Map> fresh = Map::CopyNormalized(fast_map, mode);
DCHECK(memcmp(fresh->address(),
@@ -6896,7 +6876,9 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
Map::kCodeCacheOffset) == 0);
STATIC_ASSERT(Map::kDependentCodeOffset ==
Map::kCodeCacheOffset + kPointerSize);
- int offset = Map::kDependentCodeOffset + kPointerSize;
+ STATIC_ASSERT(Map::kWeakCellCacheOffset ==
+ Map::kDependentCodeOffset + kPointerSize);
+ int offset = Map::kWeakCellCacheOffset + kPointerSize;
DCHECK(memcmp(fresh->address() + offset,
new_map->address() + offset,
Map::kSize - offset) == 0);
@@ -7017,11 +6999,12 @@ void Map::TraceTransition(const char* what, Map* from, Map* to, Name* name) {
// static
void Map::TraceAllTransitions(Map* map) {
- if (!map->HasTransitionArray()) return;
- TransitionArray* transitions = map->transitions();
- for (int i = 0; i < transitions->number_of_transitions(); ++i) {
- Map* target = transitions->GetTarget(i);
- Map::TraceTransition("Transition", map, target, transitions->GetKey(i));
+ Object* transitions = map->raw_transitions();
+ int num_transitions = TransitionArray::NumberOfTransitions(transitions);
+ for (int i = -0; i < num_transitions; ++i) {
+ Map* target = TransitionArray::GetTarget(transitions, i);
+ Name* key = TransitionArray::GetKey(transitions, i);
+ Map::TraceTransition("Transition", map, target, key);
Map::TraceAllTransitions(target);
}
}
@@ -7038,13 +7021,7 @@ void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
Map::TraceTransition("NoTransition", *parent, *child, *name);
#endif
} else {
- Handle<TransitionArray> transitions =
- TransitionArray::Insert(parent, name, child, flag);
- if (!parent->HasTransitionArray() ||
- *transitions != parent->transitions()) {
- parent->set_transitions(*transitions);
- }
- child->SetBackPointer(*parent);
+ TransitionArray::Insert(parent, name, child, flag);
if (child->prototype()->IsJSObject()) {
Handle<JSObject> proto(JSObject::cast(child->prototype()));
if (!child->ShouldRegisterAsPrototypeUser(proto)) {
@@ -7068,7 +7045,8 @@ Handle<Map> Map::CopyReplaceDescriptors(
Handle<Map> result = CopyDropDescriptors(map);
if (!map->is_prototype_map()) {
- if (flag == INSERT_TRANSITION && map->CanHaveMoreTransitions()) {
+ if (flag == INSERT_TRANSITION &&
+ TransitionArray::CanHaveMoreTransitions(map)) {
result->InitializeDescriptors(*descriptors, *layout_descriptor);
Handle<Name> name;
@@ -7092,7 +7070,8 @@ Handle<Map> Map::CopyReplaceDescriptors(
if (FLAG_trace_maps &&
// Mirror conditions above that did not call ConnectTransition().
(map->is_prototype_map() ||
- !(flag == INSERT_TRANSITION && map->CanHaveMoreTransitions()))) {
+ !(flag == INSERT_TRANSITION &&
+ TransitionArray::CanHaveMoreTransitions(map)))) {
PrintF("[TraceMaps: ReplaceDescriptors from= %p to= %p reason= %s ]\n",
reinterpret_cast<void*>(*map), reinterpret_cast<void*>(*result),
reason);
@@ -7150,22 +7129,24 @@ Handle<Map> Map::CopyInstallDescriptors(
Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
TransitionFlag flag) {
+ Map* maybe_elements_transition_map = NULL;
if (flag == INSERT_TRANSITION) {
- DCHECK(!map->HasElementsTransition() ||
- ((map->elements_transition_map()->elements_kind() ==
- DICTIONARY_ELEMENTS ||
+ maybe_elements_transition_map = map->ElementsTransitionMap();
+ DCHECK(
+ maybe_elements_transition_map == NULL ||
+ ((maybe_elements_transition_map->elements_kind() ==
+ DICTIONARY_ELEMENTS ||
IsExternalArrayElementsKind(
- map->elements_transition_map()->elements_kind())) &&
- (kind == DICTIONARY_ELEMENTS ||
- IsExternalArrayElementsKind(kind))));
+ maybe_elements_transition_map->elements_kind())) &&
+ (kind == DICTIONARY_ELEMENTS || IsExternalArrayElementsKind(kind))));
DCHECK(!IsFastElementsKind(kind) ||
IsMoreGeneralElementsKindTransition(map->elements_kind(), kind));
DCHECK(kind != map->elements_kind());
}
bool insert_transition = flag == INSERT_TRANSITION &&
- map->CanHaveMoreTransitions() &&
- !map->HasElementsTransition();
+ TransitionArray::CanHaveMoreTransitions(map) &&
+ maybe_elements_transition_map == NULL;
if (insert_transition) {
Handle<Map> new_map = CopyForTransition(map, "CopyAsElementsKind");
@@ -7189,7 +7170,7 @@ Handle<Map> Map::CopyForObserved(Handle<Map> map) {
Isolate* isolate = map->GetIsolate();
bool insert_transition =
- map->CanHaveMoreTransitions() && !map->is_prototype_map();
+ TransitionArray::CanHaveMoreTransitions(map) && !map->is_prototype_map();
if (insert_transition) {
Handle<Map> new_map = CopyForTransition(map, "CopyForObserved");
@@ -7355,9 +7336,10 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
// Migrate to the newest map before storing the property.
map = Update(map);
- int index = map->SearchTransition(kData, *name, attributes);
- if (index != TransitionArray::kNotFound) {
- Handle<Map> transition(map->GetTransition(index));
+ Map* maybe_transition =
+ TransitionArray::SearchTransition(*map, kData, *name, attributes);
+ if (maybe_transition != NULL) {
+ Handle<Map> transition(maybe_transition);
int descriptor = transition->LastAdded();
DCHECK_EQ(attributes, transition->instance_descriptors()
@@ -7446,9 +7428,10 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
? KEEP_INOBJECT_PROPERTIES
: CLEAR_INOBJECT_PROPERTIES;
- int index = map->SearchTransition(kAccessor, *name, attributes);
- if (index != TransitionArray::kNotFound) {
- Handle<Map> transition(map->GetTransition(index));
+ Map* maybe_transition =
+ TransitionArray::SearchTransition(*map, kAccessor, *name, attributes);
+ if (maybe_transition != NULL) {
+ Handle<Map> transition(maybe_transition, isolate);
DescriptorArray* descriptors = transition->instance_descriptors();
int descriptor = transition->LastAdded();
DCHECK(descriptors->GetKey(descriptor)->Equals(*name));
@@ -7520,9 +7503,8 @@ Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
// Ensure the key is unique.
descriptor->KeyToUniqueName();
- if (flag == INSERT_TRANSITION &&
- map->owns_descriptors() &&
- map->CanHaveMoreTransitions()) {
+ if (flag == INSERT_TRANSITION && map->owns_descriptors() &&
+ TransitionArray::CanHaveMoreTransitions(map)) {
return ShareDescriptor(map, descriptors, descriptor);
}
@@ -7686,207 +7668,6 @@ void Map::RemoveFromCodeCache(Name* name, Code* code, int index) {
}
-// An iterator over all map transitions in an descriptor array, reusing the
-// constructor field of the map while it is running. Negative values in
-// the constructor field indicate an active map transition iteration. The
-// original constructor is restored after iterating over all entries.
-class IntrusiveMapTransitionIterator {
- public:
- IntrusiveMapTransitionIterator(
- Map* map, TransitionArray* transition_array, Object* constructor)
- : map_(map),
- transition_array_(transition_array),
- constructor_(constructor) { }
-
- void StartIfNotStarted() {
- DCHECK(!(*IteratorField())->IsSmi() || IsIterating());
- if (!(*IteratorField())->IsSmi()) {
- DCHECK(*IteratorField() == constructor_);
- *IteratorField() = Smi::FromInt(-1);
- }
- }
-
- bool IsIterating() {
- return (*IteratorField())->IsSmi() &&
- Smi::cast(*IteratorField())->value() < 0;
- }
-
- Map* Next() {
- DCHECK(IsIterating());
- int value = Smi::cast(*IteratorField())->value();
- int index = -value - 1;
- int number_of_transitions = transition_array_->number_of_transitions();
- if (index < number_of_transitions) {
- *IteratorField() = Smi::FromInt(value - 1);
- return transition_array_->GetTarget(index);
- }
-
- *IteratorField() = constructor_;
- return NULL;
- }
-
- private:
- Object** IteratorField() {
- return HeapObject::RawField(map_, Map::kConstructorOffset);
- }
-
- Map* map_;
- TransitionArray* transition_array_;
- Object* constructor_;
-};
-
-
-// An iterator over all prototype transitions, reusing the constructor field
-// of the map while it is running. Positive values in the constructor field
-// indicate an active prototype transition iteration. The original constructor
-// is restored after iterating over all entries.
-class IntrusivePrototypeTransitionIterator {
- public:
- IntrusivePrototypeTransitionIterator(
- Map* map, HeapObject* proto_trans, Object* constructor)
- : map_(map), proto_trans_(proto_trans), constructor_(constructor) { }
-
- void StartIfNotStarted() {
- if (!(*IteratorField())->IsSmi()) {
- DCHECK(*IteratorField() == constructor_);
- *IteratorField() = Smi::FromInt(0);
- }
- }
-
- bool IsIterating() {
- return (*IteratorField())->IsSmi() &&
- Smi::cast(*IteratorField())->value() >= 0;
- }
-
- Map* Next() {
- DCHECK(IsIterating());
- int transitionNumber = Smi::cast(*IteratorField())->value();
- if (transitionNumber < NumberOfTransitions()) {
- *IteratorField() = Smi::FromInt(transitionNumber + 1);
- return GetTransition(transitionNumber);
- }
- *IteratorField() = constructor_;
- return NULL;
- }
-
- private:
- Object** IteratorField() {
- return HeapObject::RawField(map_, Map::kConstructorOffset);
- }
-
- int NumberOfTransitions() {
- FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
- Object* num = proto_trans->get(Map::kProtoTransitionNumberOfEntriesOffset);
- return Smi::cast(num)->value();
- }
-
- Map* GetTransition(int transitionNumber) {
- FixedArray* proto_trans = reinterpret_cast<FixedArray*>(proto_trans_);
- int index = Map::kProtoTransitionHeaderSize + transitionNumber;
- return Map::cast(proto_trans->get(index));
- }
-
- Map* map_;
- HeapObject* proto_trans_;
- Object* constructor_;
-};
-
-
-// To traverse the transition tree iteratively, we have to store two kinds of
-// information in a map: The parent map in the traversal and which children of a
-// node have already been visited. To do this without additional memory, we
-// temporarily reuse two fields with known values:
-//
-// (1) The map of the map temporarily holds the parent, and is restored to the
-// meta map afterwards.
-//
-// (2) The info which children have already been visited depends on which part
-// of the map we currently iterate. We use the constructor field of the
-// map to store the current index. We can do that because the constructor
-// is the same for all involved maps.
-//
-// (a) If we currently follow normal map transitions, we temporarily store
-// the current index in the constructor field, and restore it to the
-// original constructor afterwards. Note that a single descriptor can
-// have 0, 1, or 2 transitions.
-//
-// (b) If we currently follow prototype transitions, we temporarily store
-// the current index in the constructor field, and restore it to the
-// original constructor afterwards.
-//
-// Note that the child iterator is just a concatenation of two iterators: One
-// iterating over map transitions and one iterating over prototype transisitons.
-class TraversableMap : public Map {
- public:
- // Record the parent in the traversal within this map. Note that this destroys
- // this map's map!
- void SetParent(TraversableMap* parent) { set_map_no_write_barrier(parent); }
-
- // Reset the current map's map, returning the parent previously stored in it.
- TraversableMap* GetAndResetParent() {
- TraversableMap* old_parent = static_cast<TraversableMap*>(map());
- set_map_no_write_barrier(GetHeap()->meta_map());
- return old_parent;
- }
-
- // If we have an unvisited child map, return that one and advance. If we have
- // none, return NULL and restore the overwritten constructor field.
- TraversableMap* ChildIteratorNext(Object* constructor) {
- if (!HasTransitionArray()) return NULL;
-
- TransitionArray* transition_array = transitions();
- if (transition_array->HasPrototypeTransitions()) {
- HeapObject* proto_transitions =
- transition_array->GetPrototypeTransitions();
- IntrusivePrototypeTransitionIterator proto_iterator(this,
- proto_transitions,
- constructor);
- proto_iterator.StartIfNotStarted();
- if (proto_iterator.IsIterating()) {
- Map* next = proto_iterator.Next();
- if (next != NULL) return static_cast<TraversableMap*>(next);
- }
- }
-
- IntrusiveMapTransitionIterator transition_iterator(this,
- transition_array,
- constructor);
- transition_iterator.StartIfNotStarted();
- if (transition_iterator.IsIterating()) {
- Map* next = transition_iterator.Next();
- if (next != NULL) return static_cast<TraversableMap*>(next);
- }
-
- return NULL;
- }
-};
-
-
-// Traverse the transition tree in postorder without using the C++ stack by
-// doing pointer reversal.
-void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
- // Make sure that we do not allocate in the callback.
- DisallowHeapAllocation no_allocation;
-
- TraversableMap* current = static_cast<TraversableMap*>(this);
- // Get the root constructor here to restore it later when finished iterating
- // over maps.
- Object* root_constructor = constructor();
- while (true) {
- TraversableMap* child = current->ChildIteratorNext(root_constructor);
- if (child != NULL) {
- child->SetParent(current);
- current = child;
- } else {
- TraversableMap* parent = current->GetAndResetParent();
- callback(current, data);
- if (current == this) break;
- current = parent;
- }
- }
-}
-
-
void CodeCache::Update(
Handle<CodeCache> code_cache, Handle<Name> name, Handle<Code> code) {
// The number of monomorphic stubs for normal load/store/call IC's can grow to
@@ -8505,6 +8286,47 @@ Handle<WeakFixedArray> WeakFixedArray::Allocate(
}
+Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj,
+ AddMode mode) {
+ int length = array->Length();
+ array = EnsureSpace(array, length + 1);
+ if (mode == kReloadLengthAfterAllocation) {
+ DCHECK(array->Length() <= length);
+ length = array->Length();
+ }
+ array->Set(length, *obj);
+ array->SetLength(length + 1);
+ return array;
+}
+
+
+Handle<ArrayList> ArrayList::Add(Handle<ArrayList> array, Handle<Object> obj1,
+ Handle<Object> obj2, AddMode mode) {
+ int length = array->Length();
+ array = EnsureSpace(array, length + 2);
+ if (mode == kReloadLengthAfterAllocation) {
+ length = array->Length();
+ }
+ array->Set(length, *obj1);
+ array->Set(length + 1, *obj2);
+ array->SetLength(length + 2);
+ return array;
+}
+
+
+Handle<ArrayList> ArrayList::EnsureSpace(Handle<ArrayList> array, int length) {
+ int capacity = array->length();
+ bool empty = (capacity == 0);
+ if (capacity < kFirstIndex + length) {
+ capacity = kFirstIndex + length;
+ capacity = capacity + Max(capacity / 2, 2);
+ array = Handle<ArrayList>::cast(FixedArray::CopySize(array, capacity));
+ if (empty) array->SetLength(0);
+ }
+ return array;
+}
+
+
Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
int number_of_descriptors,
int slack) {
@@ -8655,6 +8477,36 @@ Handle<DeoptimizationOutputData> DeoptimizationOutputData::New(
}
+int HandlerTable::LookupRange(int pc_offset, int* stack_depth_out) {
+ int innermost_handler = -1, innermost_start = -1;
+ for (int i = 0; i < length(); i += kRangeEntrySize) {
+ int start_offset = Smi::cast(get(i + kRangeStartIndex))->value();
+ int end_offset = Smi::cast(get(i + kRangeEndIndex))->value();
+ int handler_offset = Smi::cast(get(i + kRangeHandlerIndex))->value();
+ int stack_depth = Smi::cast(get(i + kRangeDepthIndex))->value();
+ if (pc_offset > start_offset && pc_offset <= end_offset) {
+ DCHECK_NE(start_offset, innermost_start);
+ if (start_offset < innermost_start) continue;
+ innermost_handler = handler_offset;
+ innermost_start = start_offset;
+ *stack_depth_out = stack_depth;
+ }
+ }
+ return innermost_handler;
+}
+
+
+// TODO(turbofan): Make sure table is sorted and use binary search.
+int HandlerTable::LookupReturn(int pc_offset) {
+ for (int i = 0; i < length(); i += kReturnEntrySize) {
+ int return_offset = Smi::cast(get(i + kReturnOffsetIndex))->value();
+ int handler_offset = Smi::cast(get(i + kReturnHandlerIndex))->value();
+ if (pc_offset == return_offset) return handler_offset;
+ }
+ return -1;
+}
+
+
#ifdef DEBUG
bool DescriptorArray::IsEqualTo(DescriptorArray* other) {
if (IsEmpty()) return other->IsEmpty();
@@ -9733,7 +9585,7 @@ int Map::Hash() {
// addresses.
// Shift away the tag.
- int hash = ObjectAddressForHashing(constructor()) >> 2;
+ int hash = ObjectAddressForHashing(GetConstructor()) >> 2;
// XOR-ing the prototype and constructor directly yields too many zero bits
// when the two pointers are close (which is fairly common).
@@ -9745,7 +9597,7 @@ int Map::Hash() {
static bool CheckEquivalent(Map* first, Map* second) {
- return first->constructor() == second->constructor() &&
+ return first->GetConstructor() == second->GetConstructor() &&
first->prototype() == second->prototype() &&
first->instance_type() == second->instance_type() &&
first->bit_field() == second->bit_field() &&
@@ -9823,7 +9675,6 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
void JSFunction::MarkForOptimization() {
Isolate* isolate = GetIsolate();
- DCHECK(isolate->use_crankshaft());
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
set_code_no_write_barrier(
@@ -9847,9 +9698,7 @@ void JSFunction::AttemptConcurrentOptimization() {
// recompilation race. This goes away as soon as OSR becomes one-shot.
return;
}
- DCHECK(isolate->use_crankshaft());
DCHECK(!IsInOptimizationQueue());
- DCHECK(is_compiled() || isolate->debug()->has_break_points());
DCHECK(!IsOptimized());
DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
DCHECK(isolate->concurrent_recompilation_enabled());
@@ -10033,11 +9882,65 @@ void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
}
+static void GetMinInobjectSlack(Map* map, void* data) {
+ int slack = map->unused_property_fields();
+ if (*reinterpret_cast<int*>(data) > slack) {
+ *reinterpret_cast<int*>(data) = slack;
+ }
+}
+
+
+static void ShrinkInstanceSize(Map* map, void* data) {
+ int slack = *reinterpret_cast<int*>(data);
+ map->set_inobject_properties(map->inobject_properties() - slack);
+ map->set_unused_property_fields(map->unused_property_fields() - slack);
+ map->set_instance_size(map->instance_size() - slack * kPointerSize);
+
+ // Visitor id might depend on the instance size, recalculate it.
+ map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
+}
+
+
+void JSFunction::CompleteInobjectSlackTracking() {
+ DCHECK(has_initial_map());
+ Map* map = initial_map();
+
+ DCHECK(map->counter() >= Map::kSlackTrackingCounterEnd - 1);
+ map->set_counter(Map::kRetainingCounterStart);
+
+ int slack = map->unused_property_fields();
+ TransitionArray::TraverseTransitionTree(map, &GetMinInobjectSlack, &slack);
+ if (slack != 0) {
+ // Resize the initial map and all maps in its transition tree.
+ TransitionArray::TraverseTransitionTree(map, &ShrinkInstanceSize, &slack);
+ }
+}
+
+
+static bool PrototypeBenefitsFromNormalization(Handle<JSObject> object) {
+ DisallowHeapAllocation no_gc;
+ if (!object->HasFastProperties()) return false;
+ Map* map = object->map();
+ if (map->is_prototype_map()) return false;
+ DescriptorArray* descriptors = map->instance_descriptors();
+ for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() == kDescriptor) continue;
+ if (details.representation().IsHeapObject() ||
+ details.representation().IsTagged()) {
+ FieldIndex index = FieldIndex::ForDescriptor(map, i);
+ if (object->RawFastPropertyAt(index)->IsJSFunction()) return true;
+ }
+ }
+ return false;
+}
+
+
void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
PrototypeOptimizationMode mode) {
if (object->IsGlobalObject()) return;
if (object->IsJSGlobalProxy()) return;
- if (mode == FAST_PROTOTYPE && !object->map()->is_prototype_map()) {
+ if (mode == FAST_PROTOTYPE && PrototypeBenefitsFromNormalization(object)) {
// First normalize to ensure all JSFunctions are DATA_CONSTANT.
JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
"NormalizeAsPrototype");
@@ -10053,8 +9956,9 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
Handle<Map> new_map = Map::Copy(handle(object->map()), "CopyAsPrototype");
JSObject::MigrateToMap(object, new_map);
}
- if (object->map()->constructor()->IsJSFunction()) {
- JSFunction* constructor = JSFunction::cast(object->map()->constructor());
+ Object* maybe_constructor = object->map()->GetConstructor();
+ if (maybe_constructor->IsJSFunction()) {
+ JSFunction* constructor = JSFunction::cast(maybe_constructor);
// Replace the pointer to the exact constructor with the Object function
// from the same context if undetectable from JS. This is to avoid keeping
// memory alive unnecessarily.
@@ -10063,7 +9967,7 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
object->GetIsolate()->heap()->Object_string()) {
Context* context = constructor->context()->native_context();
JSFunction* object_function = context->object_function();
- object->map()->set_constructor(object_function);
+ object->map()->SetConstructor(object_function);
}
}
object->map()->set_is_prototype_map(true);
@@ -10161,8 +10065,9 @@ Handle<Object> CacheInitialJSArrayMaps(
i < kFastElementsKindCount; ++i) {
Handle<Map> new_map;
ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- if (current_map->HasElementsTransition()) {
- new_map = handle(current_map->elements_transition_map());
+ Map* maybe_elements_transition = current_map->ElementsTransitionMap();
+ if (maybe_elements_transition != NULL) {
+ new_map = handle(maybe_elements_transition);
DCHECK(new_map->elements_kind() == next_kind);
} else {
new_map = Map::CopyAsElementsKind(
@@ -10224,6 +10129,11 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
// needed. At that point, a new initial map is created and the
// prototype is put into the initial map where it belongs.
function->set_prototype_or_initial_map(*value);
+ if (value->IsJSObject()) {
+ // Optimize as prototype to detach it from its transition tree.
+ JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value),
+ FAST_PROTOTYPE);
+ }
}
isolate->heap()->ClearInstanceofCache();
}
@@ -10245,7 +10155,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
Handle<Map> new_map = Map::Copy(handle(function->map()), "SetPrototype");
JSObject::MigrateToMap(function, new_map);
- new_map->set_constructor(*value);
+ new_map->SetConstructor(*value);
new_map->set_non_instance_prototype(true);
Isolate* isolate = new_map->GetIsolate();
construct_prototype = handle(
@@ -10288,7 +10198,7 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
map->SetPrototype(prototype, FAST_PROTOTYPE);
}
function->set_prototype_or_initial_map(*map);
- map->set_constructor(*function);
+ map->SetConstructor(*function);
#if TRACE_MAPS
if (FLAG_trace_maps) {
PrintF("[TraceMaps: InitialMap map= %p SFI= %d_%s ]\n",
@@ -10350,11 +10260,6 @@ void JSFunction::PrintName(FILE* out) {
}
-Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) {
- return Context::cast(literals->get(JSFunction::kLiteralNativeContextIndex));
-}
-
-
// The filter is a pattern that matches function names in this way:
// "*" all; the default
// "-" all but the top-level function
@@ -10392,6 +10297,15 @@ bool JSFunction::PassesFilter(const char* raw_filter) {
}
+Handle<String> JSFunction::GetDebugName(Handle<JSFunction> function) {
+ Isolate* isolate = function->GetIsolate();
+ Handle<Object> name =
+ JSObject::GetDataProperty(function, isolate->factory()->name_string());
+ if (name->IsString()) return Handle<String>::cast(name);
+ return handle(function->shared()->DebugName(), isolate);
+}
+
+
void Oddball::Initialize(Isolate* isolate,
Handle<Oddball> oddball,
const char* to_string,
@@ -10520,6 +10434,7 @@ Handle<Object> Script::GetNameOrSourceURL(Handle<Script> script) {
Handle<JSObject> Script::GetWrapper(Handle<Script> script) {
Isolate* isolate = script->GetIsolate();
if (!script->wrapper()->IsUndefined()) {
+ DCHECK(script->wrapper()->IsWeakCell());
Handle<WeakCell> cell(WeakCell::cast(script->wrapper()));
if (!cell->cleared()) {
// Return a handle for the existing script wrapper from the cache.
@@ -10766,41 +10681,6 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
}
-static void GetMinInobjectSlack(Map* map, void* data) {
- int slack = map->unused_property_fields();
- if (*reinterpret_cast<int*>(data) > slack) {
- *reinterpret_cast<int*>(data) = slack;
- }
-}
-
-
-static void ShrinkInstanceSize(Map* map, void* data) {
- int slack = *reinterpret_cast<int*>(data);
- map->set_inobject_properties(map->inobject_properties() - slack);
- map->set_unused_property_fields(map->unused_property_fields() - slack);
- map->set_instance_size(map->instance_size() - slack * kPointerSize);
-
- // Visitor id might depend on the instance size, recalculate it.
- map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
-}
-
-
-void JSFunction::CompleteInobjectSlackTracking() {
- DCHECK(has_initial_map());
- Map* map = initial_map();
-
- DCHECK(map->counter() >= Map::kSlackTrackingCounterEnd - 1);
- map->set_counter(Map::kRetainingCounterStart);
-
- int slack = map->unused_property_fields();
- map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
- if (slack != 0) {
- // Resize the initial map and all maps in its transition tree.
- map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
- }
-}
-
-
int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
BailoutId osr_ast_id) {
DisallowHeapAllocation no_gc;
@@ -10902,7 +10782,7 @@ void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
- Address p = rinfo->target_reference();
+ Address p = rinfo->target_external_reference();
VisitExternalReference(&p);
}
@@ -11411,11 +11291,18 @@ Code* Code::GetCodeAgeStub(Isolate* isolate, Age age, MarkingParity parity) {
}
-void Code::PrintDeoptLocation(FILE* out, int bailout_id) {
- Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, bailout_id);
- if (info.deopt_reason != Deoptimizer::kNoReason || info.raw_position != 0) {
- PrintF(out, " ;;; deoptimize at %d: %s\n", info.raw_position,
- Deoptimizer::GetDeoptReason(info.deopt_reason));
+void Code::PrintDeoptLocation(FILE* out, Address pc) {
+ Deoptimizer::DeoptInfo info = Deoptimizer::GetDeoptInfo(this, pc);
+ class SourcePosition pos = info.position;
+ if (info.deopt_reason != Deoptimizer::kNoReason || !pos.IsUnknown()) {
+ if (FLAG_hydrogen_track_positions) {
+ PrintF(out, " ;;; deoptimize at %d_%d: %s\n",
+ pos.inlining_id(), pos.position(),
+ Deoptimizer::GetDeoptReason(info.deopt_reason));
+ } else {
+ PrintF(out, " ;;; deoptimize at %d: %s\n", pos.raw(),
+ Deoptimizer::GetDeoptReason(info.deopt_reason));
+ }
}
}
@@ -11482,11 +11369,9 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
os << "\n";
}
for (int i = 0; i < deopt_count; i++) {
- // TODO(svenpanne) Add some basic formatting to our streams.
- Vector<char> buf1 = Vector<char>::New(128);
- SNPrintF(buf1, "%6d %6d %6d %6d", i, AstId(i).ToInt(),
- ArgumentsStackHeight(i)->value(), Pc(i)->value());
- os << buf1.start();
+ os << std::setw(6) << i << " " << std::setw(6) << AstId(i).ToInt() << " "
+ << std::setw(6) << ArgumentsStackHeight(i)->value() << " "
+ << std::setw(6) << Pc(i)->value();
if (!FLAG_print_code_verbose) {
os << "\n";
@@ -11507,9 +11392,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(
while (iterator.HasNext() &&
Translation::BEGIN !=
(opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
- Vector<char> buf2 = Vector<char>::New(128);
- SNPrintF(buf2, "%27s %s ", "", Translation::StringFor(opcode));
- os << buf2.start();
+ os << std::setw(31) << " " << Translation::StringFor(opcode) << " ";
switch (opcode) {
case Translation::BEGIN:
@@ -11641,13 +11524,34 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(
os << "ast id pc state\n";
for (int i = 0; i < this->DeoptPoints(); i++) {
int pc_and_state = this->PcAndState(i)->value();
- // TODO(svenpanne) Add some basic formatting to our streams.
- Vector<char> buf = Vector<char>::New(100);
- SNPrintF(buf, "%6d %8d %s\n", this->AstId(i).ToInt(),
- FullCodeGenerator::PcField::decode(pc_and_state),
- FullCodeGenerator::State2String(
- FullCodeGenerator::StateField::decode(pc_and_state)));
- os << buf.start();
+ os << std::setw(6) << this->AstId(i).ToInt() << " " << std::setw(8)
+ << FullCodeGenerator::PcField::decode(pc_and_state) << " "
+ << FullCodeGenerator::State2String(
+ FullCodeGenerator::StateField::decode(pc_and_state)) << "\n";
+ }
+}
+
+
+void HandlerTable::HandlerTableRangePrint(std::ostream& os) {
+ os << " from to hdlr\n";
+ for (int i = 0; i < length(); i += kRangeEntrySize) {
+ int pc_start = Smi::cast(get(i + kRangeStartIndex))->value();
+ int pc_end = Smi::cast(get(i + kRangeEndIndex))->value();
+ int handler = Smi::cast(get(i + kRangeHandlerIndex))->value();
+ int depth = Smi::cast(get(i + kRangeDepthIndex))->value();
+ os << " (" << std::setw(4) << pc_start << "," << std::setw(4) << pc_end
+ << ") -> " << std::setw(4) << handler << " (depth=" << depth << ")\n";
+ }
+}
+
+
+void HandlerTable::HandlerTableReturnPrint(std::ostream& os) {
+ os << " off hdlr\n";
+ for (int i = 0; i < length(); i += kReturnEntrySize) {
+ int pc_offset = Smi::cast(get(i + kReturnOffsetIndex))->value();
+ int handler = Smi::cast(get(i + kReturnHandlerIndex))->value();
+ os << " " << std::setw(4) << pc_offset << " -> " << std::setw(4)
+ << handler << "\n";
}
}
@@ -11755,17 +11659,12 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
for (unsigned i = 0; i < table.length(); i++) {
unsigned pc_offset = table.GetPcOffset(i);
os << static_cast<const void*>(instruction_start() + pc_offset) << " ";
- // TODO(svenpanne) Add some basic formatting to our streams.
- Vector<char> buf1 = Vector<char>::New(30);
- SNPrintF(buf1, "%4d", pc_offset);
- os << buf1.start() << " ";
+ os << std::setw(4) << pc_offset << " ";
table.PrintEntry(i, os);
os << " (sp -> fp) ";
SafepointEntry entry = table.GetEntry(i);
if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
- Vector<char> buf2 = Vector<char>::New(30);
- SNPrintF(buf2, "%6d", entry.deoptimization_index());
- os << buf2.start();
+ os << std::setw(6) << entry.deoptimization_index();
} else {
os << "<none>";
}
@@ -11787,10 +11686,9 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
os << "ast_id pc_offset loop_depth\n";
for (uint32_t i = 0; i < back_edges.length(); i++) {
- Vector<char> buf = Vector<char>::New(100);
- SNPrintF(buf, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(),
- back_edges.pc_offset(i), back_edges.loop_depth(i));
- os << buf.start();
+ os << std::setw(6) << back_edges.ast_id(i).ToInt() << " "
+ << std::setw(9) << back_edges.pc_offset(i) << " " << std::setw(10)
+ << back_edges.loop_depth(i) << "\n";
}
os << "\n";
@@ -11804,6 +11702,16 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
#endif
}
+ if (handler_table()->length() > 0) {
+ os << "Handler Table (size = " << handler_table()->Size() << ")\n";
+ if (kind() == FUNCTION) {
+ HandlerTable::cast(handler_table())->HandlerTableRangePrint(os);
+ } else if (kind() == OPTIMIZED_FUNCTION) {
+ HandlerTable::cast(handler_table())->HandlerTableReturnPrint(os);
+ }
+ os << "\n";
+ }
+
os << "RelocInfo (size = " << relocation_size() << ")\n";
for (RelocIterator it(this); !it.done(); it.next()) {
it.rinfo()->Print(GetIsolate(), os);
@@ -11948,9 +11856,9 @@ static bool GetOldValue(Isolate* isolate,
List<uint32_t>* indices) {
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnElementAttribute(object, index);
- DCHECK(maybe.has_value);
- DCHECK(maybe.value != ABSENT);
- if (maybe.value == DONT_DELETE) return false;
+ DCHECK(maybe.IsJust());
+ DCHECK(maybe.FromJust() != ABSENT);
+ if (maybe.FromJust() == DONT_DELETE) return false;
Handle<Object> value;
if (!JSObject::GetOwnElementAccessorPair(object, index).is_null()) {
value = Handle<Object>::cast(isolate->factory()->the_hole_value());
@@ -12113,77 +12021,6 @@ MaybeHandle<Object> JSArray::SetElementsLength(
}
-Handle<Map> Map::GetPrototypeTransition(Handle<Map> map,
- Handle<Object> prototype) {
- DisallowHeapAllocation no_gc;
- FixedArray* cache = map->GetPrototypeTransitions();
- int number_of_transitions = map->NumberOfProtoTransitions();
- for (int i = 0; i < number_of_transitions; i++) {
- Map* map = Map::cast(cache->get(kProtoTransitionHeaderSize + i));
- if (map->prototype() == *prototype) return handle(map);
- }
- return Handle<Map>();
-}
-
-
-Handle<Map> Map::PutPrototypeTransition(Handle<Map> map,
- Handle<Object> prototype,
- Handle<Map> target_map) {
- DCHECK(target_map->IsMap());
- DCHECK(HeapObject::cast(*prototype)->map()->IsMap());
- // Don't cache prototype transition if this map is either shared, or a map of
- // a prototype.
- if (map->is_prototype_map()) return map;
- if (map->is_dictionary_map() || !FLAG_cache_prototype_transitions) return map;
-
- const int header = kProtoTransitionHeaderSize;
-
- Handle<FixedArray> cache(map->GetPrototypeTransitions());
- int capacity = cache->length() - header;
- int transitions = map->NumberOfProtoTransitions() + 1;
-
- if (transitions > capacity) {
- // Grow array by factor 2 up to MaxCachedPrototypeTransitions.
- int new_capacity = Min(kMaxCachedPrototypeTransitions, transitions * 2);
- if (new_capacity == capacity) return map;
-
- cache = FixedArray::CopySize(cache, header + new_capacity);
-
- SetPrototypeTransitions(map, cache);
- }
-
- // Reload number of transitions as GC might shrink them.
- int last = map->NumberOfProtoTransitions();
- int entry = header + last;
-
- cache->set(entry, *target_map);
- map->SetNumberOfProtoTransitions(last + 1);
-
- return map;
-}
-
-
-void Map::ZapTransitions() {
- TransitionArray* transition_array = transitions();
- // TODO(mstarzinger): Temporarily use a slower version instead of the faster
- // MemsetPointer to investigate a crasher. Switch back to MemsetPointer.
- Object** data = transition_array->data_start();
- Object* the_hole = GetHeap()->the_hole_value();
- int length = transition_array->length();
- for (int i = 0; i < length; i++) {
- data[i] = the_hole;
- }
-}
-
-
-void Map::ZapPrototypeTransitions() {
- FixedArray* proto_transitions = GetPrototypeTransitions();
- MemsetPointer(proto_transitions->data_start(),
- GetHeap()->the_hole_value(),
- proto_transitions->length());
-}
-
-
// static
void Map::AddDependentCompilationInfo(Handle<Map> map,
DependentCode::DependencyGroup group,
@@ -12489,10 +12326,10 @@ const char* DependentCode::DependencyGroupName(DependencyGroup group) {
Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
Handle<Object> prototype,
PrototypeOptimizationMode mode) {
- Handle<Map> new_map = GetPrototypeTransition(map, prototype);
+ Handle<Map> new_map = TransitionArray::GetPrototypeTransition(map, prototype);
if (new_map.is_null()) {
new_map = Copy(map, "TransitionToPrototype");
- PutPrototypeTransition(map, prototype, new_map);
+ TransitionArray::PutPrototypeTransition(map, prototype, new_map);
new_map->SetPrototype(prototype, mode);
}
return new_map;
@@ -12948,7 +12785,8 @@ MaybeHandle<Object> JSObject::SetDictionaryElement(
element->IsTheHole());
dictionary->UpdateMaxNumberKey(index);
if (set_mode == DEFINE_PROPERTY) {
- details = PropertyDetails(attributes, DATA, details.dictionary_index());
+ details = PropertyDetails(attributes, DATA, details.dictionary_index(),
+ PropertyCellType::kInvalid);
dictionary->DetailsAtPut(entry, details);
}
@@ -12991,7 +12829,7 @@ MaybeHandle<Object> JSObject::SetDictionaryElement(
}
}
- PropertyDetails details(attributes, DATA, 0);
+ PropertyDetails details(attributes, DATA, 0, PropertyCellType::kInvalid);
Handle<SeededNumberDictionary> new_dictionary =
SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
details);
@@ -13184,8 +13022,8 @@ MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET);
+ if (!isolate->MayAccess(object)) {
+ isolate->ReportFailedAccessCheck(object);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return value;
}
@@ -13230,8 +13068,8 @@ MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnElementAttribute(object, index);
- if (!maybe.has_value) return MaybeHandle<Object>();
- PropertyAttributes old_attributes = maybe.value;
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
+ PropertyAttributes old_attributes = maybe.FromJust();
Handle<Object> old_value = isolate->factory()->the_hole_value();
Handle<Object> old_length_handle;
@@ -13261,8 +13099,8 @@ MaybeHandle<Object> JSObject::SetElement(Handle<JSObject> object,
Handle<String> name = isolate->factory()->Uint32ToString(index);
maybe = GetOwnElementAttribute(object, index);
- if (!maybe.has_value) return MaybeHandle<Object>();
- PropertyAttributes new_attributes = maybe.value;
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
+ PropertyAttributes new_attributes = maybe.FromJust();
if (old_attributes == ABSENT) {
if (object->IsJSArray() &&
@@ -13960,7 +13798,7 @@ void Dictionary<Derived, Shape, Key>::CopyValuesTo(FixedArray* elements) {
InterceptorInfo* JSObject::GetNamedInterceptor() {
DCHECK(map()->has_named_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->constructor());
+ JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
DCHECK(constructor->shared()->IsApiFunction());
Object* result =
constructor->shared()->get_api_func_data()->named_property_handler();
@@ -13970,7 +13808,7 @@ InterceptorInfo* JSObject::GetNamedInterceptor() {
InterceptorInfo* JSObject::GetIndexedInterceptor() {
DCHECK(map()->has_indexed_interceptor());
- JSFunction* constructor = JSFunction::cast(map()->constructor());
+ JSFunction* constructor = JSFunction::cast(map()->GetConstructor());
DCHECK(constructor->shared()->IsApiFunction());
Object* result =
constructor->shared()->get_api_func_data()->indexed_property_handler();
@@ -14059,8 +13897,8 @@ Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
Handle<Name> key) {
LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
- if (!maybe_result.has_value) return Maybe<bool>();
- return maybe(it.IsFound());
+ if (!maybe_result.IsJust()) return Nothing<bool>();
+ return Just(it.IsFound());
}
@@ -14070,17 +13908,17 @@ Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
HandleScope scope(isolate);
// Check access rights if needed.
if (object->IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
- RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<bool>());
- return maybe(false);
+ if (!isolate->MayAccess(object)) {
+ isolate->ReportFailedAccessCheck(object);
+ RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Nothing<bool>());
+ return Just(false);
}
}
if (object->IsJSGlobalProxy()) {
HandleScope scope(isolate);
PrototypeIterator iter(isolate, object);
- if (iter.IsAtEnd()) return maybe(false);
+ if (iter.IsAtEnd()) return Just(false);
DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
return HasRealElementProperty(
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index);
@@ -14088,8 +13926,7 @@ Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
Maybe<PropertyAttributes> result =
GetElementAttributeWithoutInterceptor(object, object, index, false);
- if (!result.has_value) return Maybe<bool>();
- return maybe(result.value != ABSENT);
+ return result.IsJust() ? Just(result.FromJust() != ABSENT) : Nothing<bool>();
}
@@ -14097,8 +13934,8 @@ Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
Handle<Name> key) {
LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
- if (!maybe_result.has_value) return Maybe<bool>();
- return maybe(it.state() == LookupIterator::ACCESSOR);
+ return maybe_result.IsJust() ? Just(it.state() == LookupIterator::ACCESSOR)
+ : Nothing<bool>();
}
@@ -14112,7 +13949,7 @@ int JSObject::NumberOfOwnProperties(PropertyAttributes filter) {
}
return map->NumberOfDescribedProperties(OWN_DESCRIPTORS, filter);
}
- return property_dictionary()->NumberOfElementsFilterAttributes(filter);
+ return property_dictionary()->NumberOfElementsFilterAttributes(this, filter);
}
@@ -14245,9 +14082,7 @@ void JSObject::GetOwnPropertyNames(
}
}
} else {
- property_dictionary()->CopyKeysTo(storage,
- index,
- filter,
+ property_dictionary()->CopyKeysTo(this, storage, index, filter,
NameDictionary::UNSORTED);
}
}
@@ -14331,11 +14166,13 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
case DICTIONARY_ELEMENTS: {
if (storage != NULL) {
- element_dictionary()->CopyKeysTo(storage,
- filter,
- SeededNumberDictionary::SORTED);
+ element_dictionary()->CopyKeysTo<DictionaryEntryType::kObjects>(
+ storage, filter, SeededNumberDictionary::SORTED);
}
- counter += element_dictionary()->NumberOfElementsFilterAttributes(filter);
+ counter +=
+ element_dictionary()
+ ->NumberOfElementsFilterAttributes<DictionaryEntryType::kObjects>(
+ filter);
break;
}
case SLOPPY_ARGUMENTS_ELEMENTS: {
@@ -14348,10 +14185,11 @@ int JSObject::GetOwnElementKeys(FixedArray* storage,
SeededNumberDictionary* dictionary =
SeededNumberDictionary::cast(arguments);
if (storage != NULL) {
- dictionary->CopyKeysTo(
+ dictionary->CopyKeysTo<DictionaryEntryType::kObjects>(
storage, filter, SeededNumberDictionary::UNSORTED);
}
- counter += dictionary->NumberOfElementsFilterAttributes(filter);
+ counter += dictionary->NumberOfElementsFilterAttributes<
+ DictionaryEntryType::kObjects>(filter);
for (int i = 0; i < mapped_length; ++i) {
if (!parameter_map->get(i + 2)->IsTheHole()) {
if (storage != NULL) storage->set(counter, Smi::FromInt(i));
@@ -14942,15 +14780,6 @@ template Object*
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
SlowReverseLookup(Object* value);
-template void
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- CopyKeysTo(
- FixedArray*,
- PropertyAttributes,
- Dictionary<SeededNumberDictionary,
- SeededNumberDictionaryShape,
- uint32_t>::SortMode);
-
template Handle<Object>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::DeleteProperty(
Handle<NameDictionary>, int);
@@ -14971,18 +14800,6 @@ template Handle<SeededNumberDictionary>
HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
Shrink(Handle<SeededNumberDictionary>, uint32_t);
-template void Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
- CopyKeysTo(
- FixedArray*,
- int,
- PropertyAttributes,
- Dictionary<
- NameDictionary, NameDictionaryShape, Handle<Name> >::SortMode);
-
-template int
-Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
- NumberOfElementsFilterAttributes(PropertyAttributes);
-
template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::Add(
Handle<NameDictionary>, Handle<Name>, Handle<Object>, PropertyDetails);
@@ -14995,10 +14812,6 @@ template Handle<FixedArray> Dictionary<
NameDictionary, NameDictionaryShape,
Handle<Name> >::GenerateNewEnumerationIndices(Handle<NameDictionary>);
-template int
-Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- NumberOfElementsFilterAttributes(PropertyAttributes);
-
template Handle<SeededNumberDictionary>
Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
Add(Handle<SeededNumberDictionary>,
@@ -15025,16 +14838,13 @@ template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>);
-template
-int Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- NumberOfEnumElements();
-
-template
-int Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
- NumberOfEnumElements();
+template bool
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
+ uint32_t>::HasComplexElements<DictionaryEntryType::kCells>();
-template bool Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t>::HasComplexElements();
+template bool
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
+ uint32_t>::HasComplexElements<DictionaryEntryType::kObjects>();
template int HashTable<SeededNumberDictionary, SeededNumberDictionaryShape,
uint32_t>::FindEntry(uint32_t);
@@ -15104,7 +14914,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
}
uint32_t result = pos;
- PropertyDetails no_details(NONE, DATA, 0);
+ PropertyDetails no_details = PropertyDetails::Empty();
while (undefs > 0) {
if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
// Adding an entry with the key beyond smi-range requires
@@ -15465,40 +15275,37 @@ Handle<Object> ExternalFloat64Array::SetValue(
void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
Handle<Name> name) {
DCHECK(!global->HasFastProperties());
- Isolate* isolate = global->GetIsolate();
- int entry = global->property_dictionary()->FindEntry(name);
- if (entry != NameDictionary::kNotFound) {
- Handle<PropertyCell> cell(
- PropertyCell::cast(global->property_dictionary()->ValueAt(entry)));
-
- Handle<Object> value(cell->value(), isolate);
- Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell(value);
- global->property_dictionary()->ValueAtPut(entry, *new_cell);
-
- Handle<Object> hole = global->GetIsolate()->factory()->the_hole_value();
- PropertyCell::SetValueInferType(cell, hole);
- }
+ auto dictionary = handle(global->property_dictionary());
+ int entry = dictionary->FindEntry(name);
+ if (entry == NameDictionary::kNotFound) return;
+ PropertyCell::InvalidateEntry(dictionary, entry);
}
+// TODO(dcarney): rename to EnsureEmptyPropertyCell or something.
Handle<PropertyCell> GlobalObject::EnsurePropertyCell(
Handle<GlobalObject> global, Handle<Name> name) {
DCHECK(!global->HasFastProperties());
- int entry = global->property_dictionary()->FindEntry(name);
- if (entry == NameDictionary::kNotFound) {
- Isolate* isolate = global->GetIsolate();
- Handle<PropertyCell> cell = isolate->factory()->NewPropertyCellWithHole();
- PropertyDetails details(NONE, DATA, 0);
- details = details.AsDeleted();
- Handle<NameDictionary> dictionary = NameDictionary::Add(
- handle(global->property_dictionary()), name, cell, details);
- global->set_properties(*dictionary);
+ auto dictionary = handle(global->property_dictionary());
+ int entry = dictionary->FindEntry(name);
+ Handle<PropertyCell> cell;
+ if (entry != NameDictionary::kNotFound) {
+ // This call should be idempotent.
+ DCHECK(dictionary->DetailsAt(entry).cell_type() ==
+ PropertyCellType::kUninitialized ||
+ dictionary->DetailsAt(entry).cell_type() ==
+ PropertyCellType::kDeleted);
+ DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
+ cell = handle(PropertyCell::cast(dictionary->ValueAt(entry)));
+ DCHECK(cell->value()->IsTheHole());
return cell;
- } else {
- Object* value = global->property_dictionary()->ValueAt(entry);
- DCHECK(value->IsPropertyCell());
- return handle(PropertyCell::cast(value));
}
+ Isolate* isolate = global->GetIsolate();
+ cell = isolate->factory()->NewPropertyCell();
+ PropertyDetails details(NONE, DATA, 0, PropertyCellType::kUninitialized);
+ dictionary = NameDictionary::Add(dictionary, name, cell, details);
+ global->set_properties(*dictionary);
+ return cell;
}
@@ -15908,8 +15715,7 @@ Dictionary<Derived, Shape, Key>::GenerateNewEnumerationIndices(
int enum_index = PropertyDetails::kInitialIndex + i;
PropertyDetails details = dictionary->DetailsAt(index);
- PropertyDetails new_details =
- PropertyDetails(details.attributes(), details.type(), enum_index);
+ PropertyDetails new_details = details.set_index(enum_index);
dictionary->DetailsAtPut(index, new_details);
}
@@ -15962,7 +15768,7 @@ Handle<Derived> Dictionary<Derived, Shape, Key>::AtPut(
#ifdef DEBUG
USE(Shape::AsHandle(dictionary->GetIsolate(), key));
#endif
- PropertyDetails details(NONE, DATA, 0);
+ PropertyDetails details = PropertyDetails::Empty();
AddEntry(dictionary, key, value, details, dictionary->Hash(key));
return dictionary;
@@ -15998,13 +15804,11 @@ void Dictionary<Derived, Shape, Key>::AddEntry(
uint32_t entry = dictionary->FindInsertionEntry(hash);
// Insert element at empty or deleted entry
- if (!details.IsDeleted() &&
- details.dictionary_index() == 0 &&
- Shape::kIsEnumerable) {
+ if (details.dictionary_index() == 0 && Shape::kIsEnumerable) {
// Assign an enumeration index to the property and update
// SetNextEnumerationIndex.
int index = dictionary->NextEnumerationIndex();
- details = PropertyDetails(details.attributes(), details.type(), index);
+ details = details.set_index(index);
dictionary->SetNextEnumerationIndex(index + 1);
}
dictionary->SetEntry(entry, k, value, details);
@@ -16050,7 +15854,7 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::AddNumberEntry(
uint32_t key,
Handle<Object> value) {
SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
- return Add(dictionary, key, value, PropertyDetails(NONE, DATA, 0));
+ return Add(dictionary, key, value, PropertyDetails::Empty());
}
@@ -16081,9 +15885,7 @@ Handle<SeededNumberDictionary> SeededNumberDictionary::Set(
return AddNumberEntry(dictionary, key, value, details);
}
// Preserve enumeration index.
- details = PropertyDetails(details.attributes(),
- details.type(),
- dictionary->DetailsAt(entry).dictionary_index());
+ details = details.set_index(dictionary->DetailsAt(entry).dictionary_index());
Handle<Object> object_key =
SeededNumberDictionaryShape::AsHandle(dictionary->GetIsolate(), key);
dictionary->SetEntry(entry, object_key, value, details);
@@ -16104,8 +15906,22 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::Set(
}
+template <DictionaryEntryType type, typename D>
+static inline bool IsDeleted(D d, int i) {
+ switch (type) {
+ case DictionaryEntryType::kObjects:
+ return false;
+ case DictionaryEntryType::kCells:
+ DCHECK(d->ValueAt(i)->IsPropertyCell());
+ return PropertyCell::cast(d->ValueAt(i))->value()->IsTheHole();
+ }
+ UNREACHABLE();
+ return false;
+}
-template<typename Derived, typename Shape, typename Key>
+
+template <typename Derived, typename Shape, typename Key>
+template <DictionaryEntryType type>
int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
PropertyAttributes filter) {
int capacity = DerivedHashTable::Capacity();
@@ -16113,8 +15929,8 @@ int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
for (int i = 0; i < capacity; i++) {
Object* k = DerivedHashTable::KeyAt(i);
if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) {
+ if (IsDeleted<type>(this, i)) continue;
PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
if ((attr & filter) == 0) result++;
}
@@ -16123,21 +15939,15 @@ int Dictionary<Derived, Shape, Key>::NumberOfElementsFilterAttributes(
}
-template<typename Derived, typename Shape, typename Key>
-int Dictionary<Derived, Shape, Key>::NumberOfEnumElements() {
- return NumberOfElementsFilterAttributes(
- static_cast<PropertyAttributes>(DONT_ENUM | SYMBOLIC));
-}
-
-
template <typename Derived, typename Shape, typename Key>
+template <DictionaryEntryType type>
bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
int capacity = DerivedHashTable::Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = DerivedHashTable::KeyAt(i);
if (DerivedHashTable::IsKey(k) && !FilterKey(k, NONE)) {
+ if (IsDeleted<type>(this, i)) continue;
PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
if (details.type() == ACCESSOR_CONSTANT) return true;
PropertyAttributes attr = details.attributes();
if (attr & (READ_ONLY | DONT_DELETE | DONT_ENUM)) return true;
@@ -16148,17 +15958,18 @@ bool Dictionary<Derived, Shape, Key>::HasComplexElements() {
template <typename Derived, typename Shape, typename Key>
+template <DictionaryEntryType type>
void Dictionary<Derived, Shape, Key>::CopyKeysTo(
FixedArray* storage, PropertyAttributes filter,
typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
- DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
+ DCHECK(storage->length() >= NumberOfElementsFilterAttributes<type>(filter));
int capacity = DerivedHashTable::Capacity();
int index = 0;
for (int i = 0; i < capacity; i++) {
Object* k = DerivedHashTable::KeyAt(i);
if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) {
+ if (IsDeleted<type>(this, i)) continue;
PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
if ((attr & filter) == 0) storage->set(index++, k);
}
@@ -16181,6 +15992,7 @@ struct EnumIndexComparator {
};
+template <DictionaryEntryType type>
void NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
int length = storage->length();
int capacity = Capacity();
@@ -16189,7 +16001,7 @@ void NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
Object* k = KeyAt(i);
if (IsKey(k) && !k->IsSymbol()) {
PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted() || details.IsDontEnum()) continue;
+ if (details.IsDontEnum() || IsDeleted<type>(this, i)) continue;
storage->set(properties, Smi::FromInt(i));
properties++;
if (properties == length) break;
@@ -16206,19 +16018,18 @@ void NameDictionary::CopyEnumKeysTo(FixedArray* storage) {
}
-template<typename Derived, typename Shape, typename Key>
+template <typename Derived, typename Shape, typename Key>
+template <DictionaryEntryType type>
void Dictionary<Derived, Shape, Key>::CopyKeysTo(
- FixedArray* storage,
- int index,
- PropertyAttributes filter,
+ FixedArray* storage, int index, PropertyAttributes filter,
typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
- DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
+ DCHECK(storage->length() >= NumberOfElementsFilterAttributes<type>(filter));
int capacity = DerivedHashTable::Capacity();
for (int i = 0; i < capacity; i++) {
Object* k = DerivedHashTable::KeyAt(i);
if (DerivedHashTable::IsKey(k) && !FilterKey(k, filter)) {
+ if (IsDeleted<type>(this, i)) continue;
PropertyDetails details = DetailsAt(i);
- if (details.IsDeleted()) continue;
PropertyAttributes attr = details.attributes();
if ((attr & filter) == 0) storage->set(index++, k);
}
@@ -16238,6 +16049,7 @@ Object* Dictionary<Derived, Shape, Key>::SlowReverseLookup(Object* value) {
Object* k = DerivedHashTable::KeyAt(i);
if (Dictionary::IsKey(k)) {
Object* e = ValueAt(i);
+ // TODO(dcarney): this should be templatized.
if (e->IsPropertyCell()) {
e = PropertyCell::cast(e)->value();
}
@@ -16863,12 +16675,14 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info,
// Get the break point objects for a code position.
-Object* DebugInfo::GetBreakPointObjects(int code_position) {
+Handle<Object> DebugInfo::GetBreakPointObjects(int code_position) {
Object* break_point_info = GetBreakPointInfo(code_position);
if (break_point_info->IsUndefined()) {
- return GetHeap()->undefined_value();
+ return GetIsolate()->factory()->undefined_value();
}
- return BreakPointInfo::cast(break_point_info)->break_point_objects();
+ return Handle<Object>(
+ BreakPointInfo::cast(break_point_info)->break_point_objects(),
+ GetIsolate());
}
@@ -16887,22 +16701,22 @@ int DebugInfo::GetBreakPointCount() {
}
-Object* DebugInfo::FindBreakPointInfo(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object) {
- Heap* heap = debug_info->GetHeap();
- if (debug_info->break_points()->IsUndefined()) return heap->undefined_value();
- for (int i = 0; i < debug_info->break_points()->length(); i++) {
- if (!debug_info->break_points()->get(i)->IsUndefined()) {
- Handle<BreakPointInfo> break_point_info =
- Handle<BreakPointInfo>(BreakPointInfo::cast(
- debug_info->break_points()->get(i)));
- if (BreakPointInfo::HasBreakPointObject(break_point_info,
- break_point_object)) {
- return *break_point_info;
+Handle<Object> DebugInfo::FindBreakPointInfo(
+ Handle<DebugInfo> debug_info, Handle<Object> break_point_object) {
+ Isolate* isolate = debug_info->GetIsolate();
+ if (!debug_info->break_points()->IsUndefined()) {
+ for (int i = 0; i < debug_info->break_points()->length(); i++) {
+ if (!debug_info->break_points()->get(i)->IsUndefined()) {
+ Handle<BreakPointInfo> break_point_info = Handle<BreakPointInfo>(
+ BreakPointInfo::cast(debug_info->break_points()->get(i)), isolate);
+ if (BreakPointInfo::HasBreakPointObject(break_point_info,
+ break_point_object)) {
+ return break_point_info;
+ }
}
}
}
- return heap->undefined_value();
+ return isolate->factory()->undefined_value();
}
@@ -17223,8 +17037,15 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
fixed_typed_array->length(), typed_array->type(),
static_cast<uint8_t*>(buffer->backing_store()));
- buffer->set_weak_first_view(*typed_array);
- DCHECK(typed_array->weak_next() == isolate->heap()->undefined_value());
+ Heap* heap = isolate->heap();
+ if (heap->InNewSpace(*typed_array)) {
+ DCHECK(typed_array->weak_next() == isolate->heap()->undefined_value());
+ typed_array->set_weak_next(heap->new_array_buffer_views_list());
+ heap->set_new_array_buffer_views_list(*typed_array);
+ } else {
+ buffer->set_weak_first_view(*typed_array);
+ DCHECK(typed_array->weak_next() == isolate->heap()->undefined_value());
+ }
typed_array->set_buffer(*buffer);
JSObject::SetMapAndElements(typed_array, new_map, new_elements);
@@ -17243,54 +17064,109 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
}
-HeapType* PropertyCell::type() {
- return static_cast<HeapType*>(type_raw());
+Handle<PropertyCell> PropertyCell::InvalidateEntry(
+ Handle<NameDictionary> dictionary, int entry) {
+ Isolate* isolate = dictionary->GetIsolate();
+ // Swap with a copy.
+ DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
+ Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
+ auto new_cell = isolate->factory()->NewPropertyCell();
+ new_cell->set_value(cell->value());
+ dictionary->ValueAtPut(entry, *new_cell);
+ bool is_the_hole = cell->value()->IsTheHole();
+ // Cell is officially mutable henceforth.
+ auto details = dictionary->DetailsAt(entry);
+ details = details.set_cell_type(is_the_hole ? PropertyCellType::kDeleted
+ : PropertyCellType::kMutable);
+ dictionary->DetailsAtPut(entry, details);
+ // Old cell is ready for invalidation.
+ if (is_the_hole) {
+ cell->set_value(isolate->heap()->undefined_value());
+ } else {
+ cell->set_value(isolate->heap()->the_hole_value());
+ }
+ cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kPropertyCellChangedGroup);
+ return new_cell;
}
-void PropertyCell::set_type(HeapType* type, WriteBarrierMode ignored) {
- DCHECK(IsPropertyCell());
- set_type_raw(type, ignored);
+PropertyCellType PropertyCell::UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value,
+ PropertyDetails details) {
+ PropertyCellType type = details.cell_type();
+ DCHECK(!value->IsTheHole());
+ DCHECK_IMPLIES(cell->value()->IsTheHole(),
+ type == PropertyCellType::kUninitialized ||
+ type == PropertyCellType::kDeleted);
+ switch (type) {
+ // Only allow a cell to transition once into constant state.
+ case PropertyCellType::kUninitialized:
+ if (value->IsUndefined()) return PropertyCellType::kUndefined;
+ return PropertyCellType::kConstant;
+ case PropertyCellType::kUndefined:
+ return PropertyCellType::kConstant;
+ case PropertyCellType::kConstant:
+ // No transition.
+ if (*value == cell->value()) return PropertyCellType::kConstant;
+ // Fall through.
+ case PropertyCellType::kMutable:
+ return PropertyCellType::kMutable;
+ }
+ UNREACHABLE();
+ return PropertyCellType::kMutable;
}
-Handle<HeapType> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
- Handle<Object> value) {
- Isolate* isolate = cell->GetIsolate();
- Handle<HeapType> old_type(cell->type(), isolate);
- Handle<HeapType> new_type = HeapType::Constant(value, isolate);
-
- if (new_type->Is(old_type)) return old_type;
-
- cell->dependent_code()->DeoptimizeDependentCodeGroup(
- isolate, DependentCode::kPropertyCellChangedGroup);
-
- if (old_type->Is(HeapType::None()) || old_type->Is(HeapType::Undefined())) {
- return new_type;
+Handle<Object> PropertyCell::UpdateCell(Handle<NameDictionary> dictionary,
+ int entry, Handle<Object> value,
+ PropertyDetails details) {
+ DCHECK(!value->IsTheHole());
+ DCHECK(dictionary->ValueAt(entry)->IsPropertyCell());
+ Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
+ const PropertyDetails original_details = dictionary->DetailsAt(entry);
+ // Data accesses could be cached in ics or optimized code.
+ bool invalidate =
+ original_details.kind() == kData && details.kind() == kAccessor;
+ int index = original_details.dictionary_index();
+ auto old_type = original_details.cell_type();
+ // Preserve the enumeration index unless the property was deleted or never
+ // initialized.
+ if (cell->value()->IsTheHole()) {
+ index = dictionary->NextEnumerationIndex();
+ dictionary->SetNextEnumerationIndex(index + 1);
+ // Negative lookup cells must be invalidated.
+ invalidate = true;
}
+ DCHECK(index > 0);
+ details = details.set_index(index);
- return HeapType::Any(isolate);
-}
-
-
-Handle<Object> PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
- Handle<Object> value) {
// Heuristic: if a small-ish string is stored in a previously uninitialized
// property cell, internalize it.
const int kMaxLengthForInternalization = 200;
- if ((cell->type()->Is(HeapType::None()) ||
- cell->type()->Is(HeapType::Undefined())) &&
+ if ((old_type == PropertyCellType::kUninitialized ||
+ old_type == PropertyCellType::kUndefined) &&
value->IsString()) {
auto string = Handle<String>::cast(value);
- if (string->length() <= kMaxLengthForInternalization &&
- !string->map()->is_undetectable()) {
+ if (string->length() <= kMaxLengthForInternalization) {
value = cell->GetIsolate()->factory()->InternalizeString(string);
}
}
+
+ auto new_type = UpdatedType(cell, value, original_details);
+ if (invalidate) cell = PropertyCell::InvalidateEntry(dictionary, entry);
+
+ // Install new property details and cell value.
+ details = details.set_cell_type(new_type);
+ dictionary->DetailsAtPut(entry, details);
cell->set_value(*value);
- if (!HeapType::Any()->Is(cell->type())) {
- Handle<HeapType> new_type = UpdatedType(cell, value);
- cell->set_type(*new_type);
+
+ // Deopt when transitioning from a constant type.
+ if (!invalidate && old_type == PropertyCellType::kConstant &&
+ new_type != PropertyCellType::kConstant) {
+ auto isolate = dictionary->GetIsolate();
+ cell->dependent_code()->DeoptimizeDependentCodeGroup(
+ isolate, DependentCode::kPropertyCellChangedGroup);
}
return value;
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index d4af4a6255..5faf62b6ca 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -122,7 +122,7 @@
// - Symbol
// - HeapNumber
// - Cell
-// - PropertyCell
+// - PropertyCell
// - Code
// - Map
// - Oddball
@@ -662,7 +662,6 @@ enum InstanceType {
CODE_TYPE,
ODDBALL_TYPE,
CELL_TYPE,
- PROPERTY_CELL_TYPE,
// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
@@ -718,6 +717,7 @@ enum InstanceType {
CONSTANT_POOL_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
WEAK_CELL_TYPE,
+ PROPERTY_CELL_TYPE,
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
@@ -938,9 +938,11 @@ template <class C> inline bool Is(Object* obj);
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
V(DependentCode) \
+ V(HandlerTable) \
V(FixedArray) \
V(FixedDoubleArray) \
V(WeakFixedArray) \
+ V(ArrayList) \
V(ConstantPoolArray) \
V(Context) \
V(ScriptContextTable) \
@@ -2080,7 +2082,7 @@ class JSObject: public JSReceiver {
inline void FastPropertyAtPut(FieldIndex index, Object* value);
inline void RawFastPropertyAtPut(FieldIndex index, Object* value);
inline void RawFastDoublePropertyAtPut(FieldIndex index, double value);
- void WriteToField(int descriptor, Object* value);
+ inline void WriteToField(int descriptor, Object* value);
// Access to in object properties.
inline int GetInObjectPropertyOffset(int index);
@@ -2627,6 +2629,34 @@ class WeakFixedArray : public FixedArray {
};
+// Generic array grows dynamically with O(1) amortized insertion.
+class ArrayList : public FixedArray {
+ public:
+ enum AddMode {
+ kNone,
+ // Use this if GC can delete elements from the array.
+ kReloadLengthAfterAllocation,
+ };
+ static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj,
+ AddMode mode = kNone);
+ static Handle<ArrayList> Add(Handle<ArrayList> array, Handle<Object> obj1,
+ Handle<Object> obj2, AddMode = kNone);
+ inline int Length();
+ inline void SetLength(int length);
+ inline Object* Get(int index);
+ inline Object** Slot(int index);
+ inline void Set(int index, Object* obj);
+ inline void Clear(int index, Object* undefined);
+ DECLARE_CAST(ArrayList)
+
+ private:
+ static Handle<ArrayList> EnsureSpace(Handle<ArrayList> array, int length);
+ static const int kLengthIndex = 0;
+ static const int kFirstIndex = 1;
+ DISALLOW_IMPLICIT_CONSTRUCTORS(ArrayList);
+};
+
+
// ConstantPoolArray describes a fixed-sized array containing constant pool
// entries.
//
@@ -3504,6 +3534,9 @@ class StringTable: public HashTable<StringTable,
};
+enum class DictionaryEntryType { kObjects, kCells };
+
+
template <typename Derived, typename Shape, typename Key>
class Dictionary: public HashTable<Derived, Shape, Key> {
protected:
@@ -3532,9 +3565,6 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
this->set(DerivedHashTable::EntryToIndex(entry) + 2, value.AsSmi());
}
- // Sorting support
- void CopyValuesTo(FixedArray* elements);
-
// Delete a property from the dictionary.
static Handle<Object> DeleteProperty(Handle<Derived> dictionary, int entry);
@@ -3545,27 +3575,82 @@ class Dictionary: public HashTable<Derived, Shape, Key> {
return DerivedHashTable::Shrink(dictionary, key);
}
+ // Sorting support
+ // TODO(dcarney): templatize or move to SeededNumberDictionary
+ void CopyValuesTo(FixedArray* elements);
+
// Returns the number of elements in the dictionary filtering out properties
// with the specified attributes.
+ template <DictionaryEntryType type>
int NumberOfElementsFilterAttributes(PropertyAttributes filter);
+ int NumberOfElementsFilterAttributes(Object* holder,
+ PropertyAttributes filter) {
+ if (holder->IsGlobalObject()) {
+ return NumberOfElementsFilterAttributes<DictionaryEntryType::kCells>(
+ filter);
+ } else {
+ return NumberOfElementsFilterAttributes<DictionaryEntryType::kObjects>(
+ filter);
+ }
+ }
// Returns the number of enumerable elements in the dictionary.
- int NumberOfEnumElements();
+ template <DictionaryEntryType type>
+ int NumberOfEnumElements() {
+ return NumberOfElementsFilterAttributes<type>(
+ static_cast<PropertyAttributes>(DONT_ENUM | SYMBOLIC));
+ }
+ int NumberOfEnumElements(Object* holder) {
+ if (holder->IsGlobalObject()) {
+ return NumberOfEnumElements<DictionaryEntryType::kCells>();
+ } else {
+ return NumberOfEnumElements<DictionaryEntryType::kObjects>();
+ }
+ }
// Returns true if the dictionary contains any elements that are non-writable,
// non-configurable, non-enumerable, or have getters/setters.
+ template <DictionaryEntryType type>
bool HasComplexElements();
+ bool HasComplexElements(Object* holder) {
+ if (holder->IsGlobalObject()) {
+ return HasComplexElements<DictionaryEntryType::kCells>();
+ } else {
+ return HasComplexElements<DictionaryEntryType::kObjects>();
+ }
+ }
enum SortMode { UNSORTED, SORTED };
+
// Copies keys to preallocated fixed array.
- void CopyKeysTo(FixedArray* storage,
- PropertyAttributes filter,
+ template <DictionaryEntryType type>
+ void CopyKeysTo(FixedArray* storage, PropertyAttributes filter,
SortMode sort_mode);
+ void CopyKeysTo(Object* holder, FixedArray* storage,
+ PropertyAttributes filter, SortMode sort_mode) {
+ if (holder->IsGlobalObject()) {
+ return CopyKeysTo<DictionaryEntryType::kCells>(storage, filter,
+ sort_mode);
+ } else {
+ return CopyKeysTo<DictionaryEntryType::kObjects>(storage, filter,
+ sort_mode);
+ }
+ }
+
// Fill in details for properties into storage.
- void CopyKeysTo(FixedArray* storage,
- int index,
- PropertyAttributes filter,
+ template <DictionaryEntryType type>
+ void CopyKeysTo(FixedArray* storage, int index, PropertyAttributes filter,
SortMode sort_mode);
+ void CopyKeysTo(Object* holder, FixedArray* storage, int index,
+ PropertyAttributes filter, SortMode sort_mode) {
+ if (holder->IsGlobalObject()) {
+ return CopyKeysTo<DictionaryEntryType::kCells>(storage, index, filter,
+ sort_mode);
+ } else {
+ return CopyKeysTo<DictionaryEntryType::kObjects>(storage, index, filter,
+ sort_mode);
+ }
+ }
// Accessors for next enumeration index.
void SetNextEnumerationIndex(int index) {
@@ -3658,7 +3743,16 @@ class NameDictionary: public Dictionary<NameDictionary,
DECLARE_CAST(NameDictionary)
// Copies enumerable keys to preallocated fixed array.
+ template <DictionaryEntryType type>
void CopyEnumKeysTo(FixedArray* storage);
+ void CopyEnumKeysTo(Object* holder, FixedArray* storage) {
+ if (holder->IsGlobalObject()) {
+ return CopyEnumKeysTo<DictionaryEntryType::kCells>(storage);
+ } else {
+ return CopyEnumKeysTo<DictionaryEntryType::kObjects>(storage);
+ }
+ }
+
inline static Handle<FixedArray> DoGenerateNewEnumerationIndices(
Handle<NameDictionary> dictionary);
@@ -4219,6 +4313,10 @@ class ScopeInfo : public FixedArray {
// exposed to the user in a debugger.
bool LocalIsSynthetic(int var);
+ String* StrongModeFreeVariableName(int var);
+ int StrongModeFreeVariableStartPosition(int var);
+ int StrongModeFreeVariableEndPosition(int var);
+
// Lookup support for serialized scope info. Returns the
// the stack slot index for a given slot name if the slot is
// present; otherwise returns a value < 0. The name must be an internalized
@@ -4245,6 +4343,8 @@ class ScopeInfo : public FixedArray {
// must be an internalized string.
int FunctionContextSlotIndex(String* name, VariableMode* mode);
+ bool block_scope_is_class_scope();
+ FunctionKind function_kind();
// Copies all the context locals into an object used to materialize a scope.
static bool CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
@@ -4269,11 +4369,12 @@ class ScopeInfo : public FixedArray {
// 3. The number of non-parameter variables allocated on the stack.
// 4. The number of non-parameter and parameter variables allocated in the
// context.
-#define FOR_EACH_NUMERIC_FIELD(V) \
- V(Flags) \
- V(ParameterCount) \
- V(StackLocalCount) \
- V(ContextLocalCount)
+#define FOR_EACH_NUMERIC_FIELD(V) \
+ V(Flags) \
+ V(ParameterCount) \
+ V(StackLocalCount) \
+ V(ContextLocalCount) \
+ V(StrongModeFreeVariableCount)
#define FIELD_ACCESSORS(name) \
void Set##name(int value) { \
@@ -4320,7 +4421,12 @@ class ScopeInfo : public FixedArray {
// the context locals in ContextLocalNameEntries. One slot is used per
// context local, so in total this part occupies ContextLocalCount()
// slots in the array.
- // 5. FunctionNameEntryIndex:
+ // 5. StrongModeFreeVariableNameEntries:
+ // Stores the names of strong mode free variables.
+ // 6. StrongModeFreeVariablePositionEntries:
+ // Stores the locations (start and end position) of strong mode free
+ // variables.
+ // 7. FunctionNameEntryIndex:
// If the scope belongs to a named function expression this part contains
// information about the function variable. It always occupies two array
// slots: a. The name of the function variable.
@@ -4329,6 +4435,8 @@ class ScopeInfo : public FixedArray {
int StackLocalEntriesIndex();
int ContextLocalNameEntriesIndex();
int ContextLocalInfoEntriesIndex();
+ int StrongModeFreeVariableNameEntriesIndex();
+ int StrongModeFreeVariablePositionEntriesIndex();
int FunctionNameEntryIndex();
// Location of the function variable for named function expressions.
@@ -4350,6 +4458,10 @@ class ScopeInfo : public FixedArray {
class AsmFunctionField : public BitField<bool, 13, 1> {};
class IsSimpleParameterListField
: public BitField<bool, AsmFunctionField::kNext, 1> {};
+ class BlockScopeIsClassScopeField
+ : public BitField<bool, IsSimpleParameterListField::kNext, 1> {};
+ class FunctionKindField
+ : public BitField<FunctionKind, BlockScopeIsClassScopeField::kNext, 8> {};
// BitFields representing the encoded information for context locals in the
// ContextLocalInfoEntries part.
@@ -4768,6 +4880,7 @@ class FixedTypedArrayBase: public FixedArrayBase {
inline int size();
+ static inline int TypedArraySize(InstanceType type, int length);
inline int TypedArraySize(InstanceType type);
// Use with care: returns raw pointer into heap.
@@ -4776,6 +4889,8 @@ class FixedTypedArrayBase: public FixedArrayBase {
inline int DataSize();
private:
+ static inline int ElementSize(InstanceType type);
+
inline int DataSize(InstanceType type);
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedTypedArrayBase);
@@ -4969,6 +5084,71 @@ class DeoptimizationOutputData: public FixedArray {
};
+// HandlerTable is a fixed array containing entries for exception handlers in
+// the code object it is associated with. The tables comes in two flavors:
+// 1) Based on ranges: Used for unoptimized code. Contains one entry per
+// exception handler and a range representing the try-block covered by that
+// handler. Layout looks as follows:
+// [ range-start , range-end , handler-offset , stack-depth ]
+// 2) Based on return addresses: Used for turbofanned code. Contains one entry
+// per call-site that could throw an exception. Layout looks as follows:
+// [ return-address-offset , handler-offset ]
+class HandlerTable : public FixedArray {
+ public:
+ // Accessors for handler table based on ranges.
+ void SetRangeStart(int index, int value) {
+ set(index * kRangeEntrySize + kRangeStartIndex, Smi::FromInt(value));
+ }
+ void SetRangeEnd(int index, int value) {
+ set(index * kRangeEntrySize + kRangeEndIndex, Smi::FromInt(value));
+ }
+ void SetRangeHandler(int index, int value) {
+ set(index * kRangeEntrySize + kRangeHandlerIndex, Smi::FromInt(value));
+ }
+ void SetRangeDepth(int index, int value) {
+ set(index * kRangeEntrySize + kRangeDepthIndex, Smi::FromInt(value));
+ }
+
+ // Accessors for handler table based on return addresses.
+ void SetReturnOffset(int index, int value) {
+ set(index * kReturnEntrySize + kReturnOffsetIndex, Smi::FromInt(value));
+ }
+ void SetReturnHandler(int index, int value) {
+ set(index * kReturnEntrySize + kReturnHandlerIndex, Smi::FromInt(value));
+ }
+
+ // Lookup handler in a table based on ranges.
+ int LookupRange(int pc_offset, int* stack_depth);
+
+ // Lookup handler in a table based on return addresses.
+ int LookupReturn(int pc_offset);
+
+ // Returns the required length of the underlying fixed array.
+ static int LengthForRange(int entries) { return entries * kRangeEntrySize; }
+ static int LengthForReturn(int entries) { return entries * kReturnEntrySize; }
+
+ DECLARE_CAST(HandlerTable)
+
+#if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
+ void HandlerTableRangePrint(std::ostream& os); // NOLINT
+ void HandlerTableReturnPrint(std::ostream& os); // NOLINT
+#endif
+
+ private:
+ // Layout description for handler table based on ranges.
+ static const int kRangeStartIndex = 0;
+ static const int kRangeEndIndex = 1;
+ static const int kRangeHandlerIndex = 2;
+ static const int kRangeDepthIndex = 3;
+ static const int kRangeEntrySize = 4;
+
+ // Layout description for handler table based on return addresses.
+ static const int kReturnOffsetIndex = 0;
+ static const int kReturnHandlerIndex = 1;
+ static const int kReturnEntrySize = 2;
+};
+
+
// Forward declaration.
class Cell;
class PropertyCell;
@@ -5388,7 +5568,7 @@ class Code: public HeapObject {
return GetCodeAgeStub(isolate, kNotExecutedCodeAge, NO_MARKING_PARITY);
}
- void PrintDeoptLocation(FILE* out, int bailout_id);
+ void PrintDeoptLocation(FILE* out, Address pc);
bool CanDeoptAt(Address pc);
#ifdef VERIFY_HEAP
@@ -5869,25 +6049,14 @@ class Map: public HeapObject {
// map with DICTIONARY_ELEMENTS was found in the prototype chain.
bool DictionaryElementsInPrototypeChainOnly();
- inline bool HasTransitionArray() const;
- inline bool HasElementsTransition();
- inline Map* elements_transition_map();
+ inline Map* ElementsTransitionMap();
- inline Map* GetTransition(int transition_index);
- inline int SearchSpecialTransition(Symbol* name);
- inline int SearchTransition(PropertyKind kind, Name* name,
- PropertyAttributes attributes);
inline FixedArrayBase* GetInitialElements();
- DECL_ACCESSORS(transitions, TransitionArray)
-
- static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
- static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
-
- // Try to follow an existing transition to a field with attributes NONE. The
- // return value indicates whether the transition was successful.
- static inline Handle<Map> FindTransitionToField(Handle<Map> map,
- Handle<Name> key);
+ // [raw_transitions]: Provides access to the transitions storage field.
+ // Don't call set_raw_transitions() directly to overwrite transitions, use
+ // the TransitionArray::ReplaceTransitions() wrapper instead!
+ DECL_ACCESSORS(raw_transitions, Object)
Map* FindRootMap();
Map* FindFieldOwner(int descriptor);
@@ -5954,7 +6123,18 @@ class Map: public HeapObject {
bool CanUseOptimizationsBasedOnPrototypeRegistry();
// [constructor]: points back to the function responsible for this map.
- DECL_ACCESSORS(constructor, Object)
+ // The field overlaps with the back pointer. All maps in a transition tree
+ // have the same constructor, so maps with back pointers can walk the
+ // back pointer chain until they find the map holding their constructor.
+ DECL_ACCESSORS(constructor_or_backpointer, Object)
+ inline Object* GetConstructor() const;
+ inline void SetConstructor(Object* constructor,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ // [back pointer]: points back to the parent map from which a transition
+ // leads to this map. The field overlaps with the constructor (see above).
+ inline Object* GetBackPointer();
+ inline void SetBackPointer(Object* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
// [instance descriptors]: describes the object.
DECL_ACCESSORS(instance_descriptors, DescriptorArray)
@@ -5981,47 +6161,11 @@ class Map: public HeapObject {
// [dependent code]: list of optimized codes that weakly embed this map.
DECL_ACCESSORS(dependent_code, DependentCode)
- // [back pointer]: points back to the parent map from which a transition
- // leads to this map. The field overlaps with prototype transitions and the
- // back pointer will be moved into the prototype transitions array if
- // required.
- inline Object* GetBackPointer();
- inline void SetBackPointer(Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- inline void init_back_pointer(Object* undefined);
-
- // [prototype transitions]: cache of prototype transitions.
- // Prototype transition is a transition that happens
- // when we change object's prototype to a new one.
- // Cache format:
- // 0: finger - index of the first free cell in the cache
- // 1 + i: target map
- inline FixedArray* GetPrototypeTransitions();
- inline bool HasPrototypeTransitions();
-
- static const int kProtoTransitionNumberOfEntriesOffset = 0;
- static const int kProtoTransitionHeaderSize = 1;
-
- inline int NumberOfProtoTransitions() {
- FixedArray* cache = GetPrototypeTransitions();
- if (cache->length() == 0) return 0;
- return
- Smi::cast(cache->get(kProtoTransitionNumberOfEntriesOffset))->value();
- }
-
- inline void SetNumberOfProtoTransitions(int value) {
- FixedArray* cache = GetPrototypeTransitions();
- DCHECK(cache->length() != 0);
- cache->set(kProtoTransitionNumberOfEntriesOffset, Smi::FromInt(value));
- }
+ // [weak cell cache]: cache that stores a weak cell pointing to this map.
+ DECL_ACCESSORS(weak_cell_cache, Object)
inline PropertyDetails GetLastDescriptorDetails();
- // The size of transition arrays are limited so they do not end up in large
- // object space. Otherwise ClearNonLiveTransitions would leak memory while
- // applying in-place right trimming.
- inline bool CanHaveMoreTransitions();
-
int LastAdded() {
int number_of_own_descriptors = NumberOfOwnDescriptors();
DCHECK(number_of_own_descriptors > 0);
@@ -6068,12 +6212,9 @@ class Map: public HeapObject {
// Returns a non-deprecated version of the input. If the input was not
// deprecated, it is directly returned. Otherwise, the non-deprecated version
// is found by re-transitioning from the root of the transition tree using the
- // descriptor array of the map. Returns NULL if no updated map is found.
- // This method also applies any pending migrations along the prototype chain.
+ // descriptor array of the map. Returns MaybeHandle<Map>() if no updated map
+ // is found.
static MaybeHandle<Map> TryUpdate(Handle<Map> map) WARN_UNUSED_RESULT;
- // Same as above, but does not touch the prototype chain.
- static MaybeHandle<Map> TryUpdateInternal(Handle<Map> map)
- WARN_UNUSED_RESULT;
// Returns a non-deprecated version of the input. This method may deprecate
// existing maps along the way if encodings conflict. Not for use while
@@ -6192,11 +6333,6 @@ class Map: public HeapObject {
// Removes a code object from the code cache at the given index.
void RemoveFromCodeCache(Name* name, Code* code, int index);
- // Set all map transitions from this map to dead maps to null. Also clear
- // back pointers in transition targets so that we do not process this map
- // again while following back pointers.
- void ClearNonLiveTransitions(Heap* heap);
-
// Computes a hash value for this map, to be used in HashTables and such.
int Hash();
@@ -6262,18 +6398,6 @@ class Map: public HeapObject {
inline int visitor_id();
inline void set_visitor_id(int visitor_id);
- typedef void (*TraverseCallback)(Map* map, void* data);
-
- void TraverseTransitionTree(TraverseCallback callback, void* data);
-
- // When you set the prototype of an object using the __proto__ accessor you
- // need a new map for the object (the prototype is stored in the map). In
- // order not to multiply maps unnecessarily we store these as transitions in
- // the original map. That way we can transition to the same map if the same
- // prototype is set, rather than creating a new map every time. The
- // transitions are in the form of a map where the keys are prototype objects
- // and the values are the maps they transition to.
- static const int kMaxCachedPrototypeTransitions = 256;
static Handle<Map> TransitionToPrototype(Handle<Map> map,
Handle<Object> prototype,
PrototypeOptimizationMode mode);
@@ -6285,15 +6409,15 @@ class Map: public HeapObject {
static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
static const int kBitField3Offset = kInstanceAttributesOffset + kIntSize;
static const int kPrototypeOffset = kBitField3Offset + kPointerSize;
- static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
- // Storage for the transition array is overloaded to directly contain a back
- // pointer if unused. When the map has transitions, the back pointer is
- // transferred to the transition array and accessed through an extra
- // indirection.
- static const int kTransitionsOrBackPointerOffset =
- kConstructorOffset + kPointerSize;
- static const int kDescriptorsOffset =
- kTransitionsOrBackPointerOffset + kPointerSize;
+ static const int kConstructorOrBackPointerOffset =
+ kPrototypeOffset + kPointerSize;
+ // When there is only one transition, it is stored directly in this field;
+ // otherwise a transition array is used.
+ // For prototype maps, this slot is used to store a pointer to the prototype
+ // object using this map.
+ static const int kTransitionsOffset =
+ kConstructorOrBackPointerOffset + kPointerSize;
+ static const int kDescriptorsOffset = kTransitionsOffset + kPointerSize;
#if V8_DOUBLE_FIELDS_UNBOXING
static const int kLayoutDecriptorOffset = kDescriptorsOffset + kPointerSize;
static const int kCodeCacheOffset = kLayoutDecriptorOffset + kPointerSize;
@@ -6302,7 +6426,8 @@ class Map: public HeapObject {
static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
#endif
static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
- static const int kSize = kDependentCodeOffset + kPointerSize;
+ static const int kWeakCellCacheOffset = kDependentCodeOffset + kPointerSize;
+ static const int kSize = kWeakCellCacheOffset + kPointerSize;
// Layout of pointer fields. Heap iteration code relies on them
// being continuously allocated.
@@ -6432,15 +6557,6 @@ class Map: public HeapObject {
static Handle<Map> TransitionElementsToSlow(Handle<Map> object,
ElementsKind to_kind);
- // Zaps the contents of backing data structures. Note that the
- // heap verifier (i.e. VerifyMarkingVisitor) relies on zapping of objects
- // holding weak references when incremental marking is used, because it also
- // iterates over objects that are otherwise unreachable.
- // In general we only want to call these functions in release mode when
- // heap verification is turned on.
- void ZapPrototypeTransitions();
- void ZapTransitions();
-
void DeprecateTransitionTree();
bool DeprecateTarget(PropertyKind kind, Name* key,
PropertyAttributes attributes,
@@ -6449,9 +6565,12 @@ class Map: public HeapObject {
Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
+ // Update field type of the given descriptor to new representation and new
+ // type. The type must be prepared for storing in descriptor array:
+ // it must be either a simple type or a map wrapped in a weak cell.
void UpdateFieldType(int descriptor_number, Handle<Name> name,
Representation new_representation,
- Handle<HeapType> new_type);
+ Handle<Object> new_wrapped_type);
void PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
PropertyAttributes attributes);
@@ -6466,16 +6585,6 @@ class Map: public HeapObject {
HeapType* old_field_type,
HeapType* new_field_type);
- static inline void SetPrototypeTransitions(
- Handle<Map> map,
- Handle<FixedArray> prototype_transitions);
-
- static Handle<Map> GetPrototypeTransition(Handle<Map> map,
- Handle<Object> prototype);
- static Handle<Map> PutPrototypeTransition(Handle<Map> map,
- Handle<Object> prototype,
- Handle<Map> target_map);
-
static const int kFastPropertiesSoftLimit = 12;
static const int kMaxFastProperties = 128;
@@ -7221,8 +7330,9 @@ class SharedFunctionInfo: public HeapObject {
kIsConciseMethod,
kIsAccessorFunction,
kIsDefaultConstructor,
- kIsBaseConstructor,
kIsSubclassConstructor,
+ kIsBaseConstructor,
+ kInClassLiteral,
kIsAsmFunction,
kDeserialized,
kCompilerHintsCount // Pseudo entry
@@ -7230,7 +7340,7 @@ class SharedFunctionInfo: public HeapObject {
// Add hints for other modes when they're added.
STATIC_ASSERT(LANGUAGE_END == 3);
- class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 7> {};
+ class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 8> {};
class DeoptCountBits : public BitField<int, 0, 4> {};
class OptReenableTriesBits : public BitField<int, 4, 18> {};
@@ -7321,11 +7431,6 @@ class JSGeneratorObject: public JSObject {
// [operand_stack]: Saved operand stack.
DECL_ACCESSORS(operand_stack, FixedArray)
- // [stack_handler_index]: Index of first stack handler in operand_stack, or -1
- // if the captured activation had no stack handler.
- inline int stack_handler_index() const;
- inline void set_stack_handler_index(int stack_handler_index);
-
DECLARE_CAST(JSGeneratorObject)
// Dispatched behavior.
@@ -7342,9 +7447,7 @@ class JSGeneratorObject: public JSObject {
static const int kReceiverOffset = kContextOffset + kPointerSize;
static const int kContinuationOffset = kReceiverOffset + kPointerSize;
static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
- static const int kStackHandlerIndexOffset =
- kOperandStackOffset + kPointerSize;
- static const int kSize = kStackHandlerIndexOffset + kPointerSize;
+ static const int kSize = kOperandStackOffset + kPointerSize;
// Resume mode, for use by runtime functions.
enum ResumeMode { NEXT, THROW };
@@ -7579,12 +7682,13 @@ class JSFunction: public JSObject {
// Returns the number of allocated literals.
inline int NumberOfLiterals();
- // Retrieve the native context from a function's literal array.
- static Context* NativeContextFromLiterals(FixedArray* literals);
-
// Used for flags such as --hydrogen-filter.
bool PassesFilter(const char* raw_filter);
+ // The function's name if it is configured, otherwise shared function info
+ // debug name.
+ static Handle<String> GetDebugName(Handle<JSFunction> function);
+
// Layout descriptors. The last property (from kNonWeakFieldsEndOffset to
// kSize) is weak and has special handling during garbage collection.
static const int kCodeEntryOffset = JSObject::kHeaderSize;
@@ -7598,10 +7702,6 @@ class JSFunction: public JSObject {
static const int kNextFunctionLinkOffset = kNonWeakFieldsEndOffset;
static const int kSize = kNextFunctionLinkOffset + kPointerSize;
- // Layout of the literals array.
- static const int kLiteralsPrefixSize = 1;
- static const int kLiteralNativeContextIndex = 0;
-
// Layout of the bound-function binding array.
static const int kBoundFunctionIndex = 0;
static const int kBoundThisIndex = 1;
@@ -7709,10 +7809,6 @@ class JSBuiltinsObject: public GlobalObject {
inline Object* javascript_builtin(Builtins::JavaScript id);
inline void set_javascript_builtin(Builtins::JavaScript id, Object* value);
- // Accessors for code of the runtime routines written in JavaScript.
- inline Code* javascript_builtin_code(Builtins::JavaScript id);
- inline void set_javascript_builtin_code(Builtins::JavaScript id, Code* value);
-
DECLARE_CAST(JSBuiltinsObject)
// Dispatched behavior.
@@ -7724,19 +7820,13 @@ class JSBuiltinsObject: public GlobalObject {
// (function and code object).
static const int kJSBuiltinsCount = Builtins::id_count;
static const int kJSBuiltinsOffset = GlobalObject::kHeaderSize;
- static const int kJSBuiltinsCodeOffset =
- GlobalObject::kHeaderSize + (kJSBuiltinsCount * kPointerSize);
static const int kSize =
- kJSBuiltinsCodeOffset + (kJSBuiltinsCount * kPointerSize);
+ GlobalObject::kHeaderSize + (kJSBuiltinsCount * kPointerSize);
static int OffsetOfFunctionWithId(Builtins::JavaScript id) {
return kJSBuiltinsOffset + id * kPointerSize;
}
- static int OffsetOfCodeWithId(Builtins::JavaScript id) {
- return kJSBuiltinsCodeOffset + id * kPointerSize;
- }
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSBuiltinsObject);
};
@@ -8121,7 +8211,6 @@ class CodeCache: public Struct {
public:
DECL_ACCESSORS(default_cache, FixedArray)
DECL_ACCESSORS(normal_type_cache, Object)
- DECL_ACCESSORS(weak_cell_cache, Object)
// Add the code object to the cache.
static void Update(
@@ -8149,8 +8238,7 @@ class CodeCache: public Struct {
static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
static const int kNormalTypeCacheOffset =
kDefaultCacheOffset + kPointerSize;
- static const int kWeakCellCacheOffset = kNormalTypeCacheOffset + kPointerSize;
- static const int kSize = kWeakCellCacheOffset + kPointerSize;
+ static const int kSize = kNormalTypeCacheOffset + kPointerSize;
private:
static void UpdateDefaultCache(
@@ -9684,14 +9772,14 @@ class Oddball: public HeapObject {
class Cell: public HeapObject {
public:
- // [value]: value of the global property.
+ // [value]: value of the cell.
DECL_ACCESSORS(value, Object)
DECLARE_CAST(Cell)
static inline Cell* FromValueAddress(Address value) {
Object* result = FromAddress(value - kValueOffset);
- DCHECK(result->IsCell() || result->IsPropertyCell());
+ DCHECK(result->IsCell());
return static_cast<Cell*>(result);
}
@@ -9716,46 +9804,38 @@ class Cell: public HeapObject {
};
-class PropertyCell: public Cell {
+class PropertyCell : public HeapObject {
public:
- // [type]: type of the global property.
- HeapType* type();
- void set_type(HeapType* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
+ // [value]: value of the global property.
+ DECL_ACCESSORS(value, Object)
// [dependent_code]: dependent code that depends on the type of the global
// property.
DECL_ACCESSORS(dependent_code, DependentCode)
- // Sets the value of the cell and updates the type field to be the union
- // of the cell's current type and the value's type. If the change causes
- // a change of the type of the cell's contents, code dependent on the cell
- // will be deoptimized.
- // Usually returns the value that was passed in, but may perform
- // non-observable modifications on it, such as internalize strings.
- static Handle<Object> SetValueInferType(Handle<PropertyCell> cell,
- Handle<Object> value);
-
// Computes the new type of the cell's contents for the given value, but
- // without actually modifying the 'type' field.
- static Handle<HeapType> UpdatedType(Handle<PropertyCell> cell,
- Handle<Object> value);
+ // without actually modifying the details.
+ static PropertyCellType UpdatedType(Handle<PropertyCell> cell,
+ Handle<Object> value,
+ PropertyDetails details);
+ static Handle<Object> UpdateCell(Handle<NameDictionary> dictionary, int entry,
+ Handle<Object> value,
+ PropertyDetails details);
+
+ static Handle<PropertyCell> InvalidateEntry(Handle<NameDictionary> dictionary,
+ int entry);
static void AddDependentCompilationInfo(Handle<PropertyCell> cell,
CompilationInfo* info);
DECLARE_CAST(PropertyCell)
- inline Address TypeAddress() {
- return address() + kTypeOffset;
- }
-
// Dispatched behavior.
DECLARE_PRINTER(PropertyCell)
DECLARE_VERIFIER(PropertyCell)
// Layout description.
- static const int kTypeOffset = kValueOffset + kPointerSize;
- static const int kDependentCodeOffset = kTypeOffset + kPointerSize;
+ static const int kValueOffset = HeapObject::kHeaderSize;
+ static const int kDependentCodeOffset = kValueOffset + kPointerSize;
static const int kSize = kDependentCodeOffset + kPointerSize;
static const int kPointerFieldsBeginOffset = kValueOffset;
@@ -9766,7 +9846,6 @@ class PropertyCell: public Cell {
kSize> BodyDescriptor;
private:
- DECL_ACCESSORS(type_raw, Object)
DISALLOW_IMPLICIT_CONSTRUCTORS(PropertyCell);
};
@@ -10577,6 +10656,7 @@ class InterceptorInfo: public Struct {
DECL_ACCESSORS(data, Object)
DECL_BOOLEAN_ACCESSORS(can_intercept_symbols)
DECL_BOOLEAN_ACCESSORS(all_can_read)
+ DECL_BOOLEAN_ACCESSORS(non_masking)
inline int flags() const;
inline void set_flags(int flags);
@@ -10598,6 +10678,7 @@ class InterceptorInfo: public Struct {
static const int kCanInterceptSymbolsBit = 0;
static const int kAllCanReadBit = 1;
+ static const int kNonMasking = 2;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
@@ -10671,6 +10752,7 @@ class FunctionTemplateInfo: public TemplateInfo {
DECL_BOOLEAN_ACCESSORS(remove_prototype)
DECL_BOOLEAN_ACCESSORS(do_not_cache)
DECL_BOOLEAN_ACCESSORS(instantiated)
+ DECL_BOOLEAN_ACCESSORS(accept_any_receiver)
DECLARE_CAST(FunctionTemplateInfo)
@@ -10716,6 +10798,7 @@ class FunctionTemplateInfo: public TemplateInfo {
static const int kRemovePrototypeBit = 4;
static const int kDoNotCacheBit = 5;
static const int kInstantiatedBit = 6;
+ static const int kAcceptAnyReceiver = 7;
DISALLOW_IMPLICIT_CONSTRUCTORS(FunctionTemplateInfo);
};
@@ -10781,10 +10864,10 @@ class DebugInfo: public Struct {
int source_position, int statement_position,
Handle<Object> break_point_object);
// Get the break point objects for a code position.
- Object* GetBreakPointObjects(int code_position);
+ Handle<Object> GetBreakPointObjects(int code_position);
// Find the break point info holding this break point object.
- static Object* FindBreakPointInfo(Handle<DebugInfo> debug_info,
- Handle<Object> break_point_object);
+ static Handle<Object> FindBreakPointInfo(Handle<DebugInfo> debug_info,
+ Handle<Object> break_point_object);
// Get the number of break points for this function.
int GetBreakPointCount();
@@ -10947,9 +11030,12 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits an external reference embedded into a code object.
virtual void VisitExternalReference(RelocInfo* rinfo);
- // Visits an external reference. The value may be modified on return.
+ // Visits an external reference.
virtual void VisitExternalReference(Address* p) {}
+ // Visits an (encoded) internal reference.
+ virtual void VisitInternalReference(RelocInfo* rinfo) {}
+
// Visits a handle that has an embedder-assigned class ID.
virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index 5999df9d6d..eda4f5ca9d 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -42,7 +42,11 @@ void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
class OptimizingCompilerThread::CompileTask : public v8::Task {
public:
- explicit CompileTask(Isolate* isolate) : isolate_(isolate) {}
+ explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
+ OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
+ base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_);
+ ++thread->ref_count_;
+ }
virtual ~CompileTask() {}
@@ -54,7 +58,6 @@ class OptimizingCompilerThread::CompileTask : public v8::Task {
DisallowHandleDereference no_deref;
OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
-
{
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
@@ -62,32 +65,14 @@ class OptimizingCompilerThread::CompileTask : public v8::Task {
base::OS::Sleep(thread->recompilation_delay_);
}
- StopFlag flag;
- OptimizedCompileJob* job = thread->NextInput(&flag);
-
- if (flag == CONTINUE) {
- thread->CompileNext(job);
- } else {
- AllowHandleDereference allow_handle_dereference;
- if (!job->info()->is_osr()) {
- DisposeOptimizedCompileJob(job, true);
- }
- }
+ thread->CompileNext(thread->NextInput(true));
}
-
- bool signal = false;
{
- base::LockGuard<base::RecursiveMutex> lock(&thread->task_count_mutex_);
- if (--thread->task_count_ == 0) {
- if (static_cast<StopFlag>(base::Acquire_Load(&thread->stop_thread_)) ==
- FLUSH) {
- base::Release_Store(&thread->stop_thread_,
- static_cast<base::AtomicWord>(CONTINUE));
- signal = true;
- }
+ base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_);
+ if (--thread->ref_count_ == 0) {
+ thread->ref_count_zero_.NotifyOne();
}
}
- if (signal) thread->stop_semaphore_.Signal();
}
Isolate* isolate_;
@@ -97,6 +82,12 @@ class OptimizingCompilerThread::CompileTask : public v8::Task {
OptimizingCompilerThread::~OptimizingCompilerThread() {
+#ifdef DEBUG
+ {
+ base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
+ DCHECK_EQ(0, ref_count_);
+ }
+#endif
DCHECK_EQ(0, input_queue_length_);
DeleteArray(input_queue_);
if (FLAG_concurrent_osr) {
@@ -168,28 +159,29 @@ void OptimizingCompilerThread::Run() {
}
-OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) {
+OptimizedCompileJob* OptimizingCompilerThread::NextInput(
+ bool check_if_flushing) {
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
- if (input_queue_length_ == 0) {
- if (flag) {
- UNREACHABLE();
- *flag = CONTINUE;
- }
- return NULL;
- }
+ if (input_queue_length_ == 0) return NULL;
OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NOT_NULL(job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
- if (flag) {
- *flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_));
+ if (check_if_flushing) {
+ if (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_)) != CONTINUE) {
+ if (!job->info()->is_osr()) {
+ AllowHandleDereference allow_handle_dereference;
+ DisposeOptimizedCompileJob(job, true);
+ }
+ return NULL;
+ }
}
return job;
}
void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
- DCHECK_NOT_NULL(job);
+ if (!job) return;
// The function may have already been optimized by OSR. Simply continue.
OptimizedCompileJob::Status status = job->OptimizeGraph();
@@ -222,7 +214,6 @@ void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
- base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
OptimizedCompileJob* job;
while (output_queue_.Dequeue(&job)) {
// OSR jobs are dealt with separately.
@@ -245,20 +236,16 @@ void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
void OptimizingCompilerThread::Flush() {
DCHECK(!IsOptimizerThread());
- bool block = true;
- if (job_based_recompilation_) {
- base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
- block = task_count_ > 0 || blocked_jobs_ > 0;
- if (block) {
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
- }
- if (FLAG_block_concurrent_recompilation) Unblock();
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
+ if (FLAG_block_concurrent_recompilation) Unblock();
+ if (!job_based_recompilation_) {
+ input_queue_semaphore_.Signal();
+ stop_semaphore_.Wait();
} else {
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
- if (FLAG_block_concurrent_recompilation) Unblock();
+ base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
+ while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE));
}
- if (!job_based_recompilation_) input_queue_semaphore_.Signal();
- if (block) stop_semaphore_.Wait();
FlushOutputQueue(true);
if (FLAG_concurrent_osr) FlushOsrBuffer(true);
if (tracing_enabled_) {
@@ -269,20 +256,16 @@ void OptimizingCompilerThread::Flush() {
void OptimizingCompilerThread::Stop() {
DCHECK(!IsOptimizerThread());
- bool block = true;
- if (job_based_recompilation_) {
- base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
- block = task_count_ > 0 || blocked_jobs_ > 0;
- if (block) {
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
- }
- if (FLAG_block_concurrent_recompilation) Unblock();
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
+ if (FLAG_block_concurrent_recompilation) Unblock();
+ if (!job_based_recompilation_) {
+ input_queue_semaphore_.Signal();
+ stop_semaphore_.Wait();
} else {
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
- if (FLAG_block_concurrent_recompilation) Unblock();
+ base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
+ while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE));
}
- if (!job_based_recompilation_) input_queue_semaphore_.Signal();
- if (block) stop_semaphore_.Wait();
if (recompilation_delay_ != 0) {
// At this point the optimizing compiler thread's event loop has stopped.
@@ -372,8 +355,6 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
if (FLAG_block_concurrent_recompilation) {
blocked_jobs_++;
} else if (job_based_recompilation_) {
- base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
- ++task_count_;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new CompileTask(isolate_), v8::Platform::kShortRunningTask);
} else {
@@ -384,10 +365,6 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
void OptimizingCompilerThread::Unblock() {
DCHECK(!IsOptimizerThread());
- {
- base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
- task_count_ += blocked_jobs_;
- }
while (blocked_jobs_ > 0) {
if (job_based_recompilation_) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h
index 3088843309..7d60d9bf74 100644
--- a/deps/v8/src/optimizing-compiler-thread.h
+++ b/deps/v8/src/optimizing-compiler-thread.h
@@ -6,6 +6,7 @@
#define V8_OPTIMIZING_COMPILER_THREAD_H_
#include "src/base/atomicops.h"
+#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
@@ -35,10 +36,10 @@ class OptimizingCompilerThread : public base::Thread {
input_queue_shift_(0),
osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
osr_buffer_cursor_(0),
- task_count_(0),
osr_hits_(0),
osr_attempts_(0),
blocked_jobs_(0),
+ ref_count_(0),
tracing_enabled_(FLAG_trace_concurrent_recompilation),
job_based_recompilation_(FLAG_job_based_recompilation),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
@@ -96,7 +97,7 @@ class OptimizingCompilerThread : public base::Thread {
void FlushOutputQueue(bool restore_function_code);
void FlushOsrBuffer(bool restore_function_code);
void CompileNext(OptimizedCompileJob* job);
- OptimizedCompileJob* NextInput(StopFlag* flag = NULL);
+ OptimizedCompileJob* NextInput(bool check_if_flushing = false);
// Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
// Tasks evicted from the cyclic buffer are discarded.
@@ -140,18 +141,15 @@ class OptimizingCompilerThread : public base::Thread {
base::TimeDelta time_spent_compiling_;
base::TimeDelta time_spent_total_;
- int task_count_;
- // TODO(jochen): This is currently a RecursiveMutex since both Flush/Stop and
- // Unblock try to get it, but the former methods both can call Unblock. Once
- // job based recompilation is on by default, and the dedicated thread can be
- // removed, this should be refactored to not use a RecursiveMutex.
- base::RecursiveMutex task_count_mutex_;
-
int osr_hits_;
int osr_attempts_;
int blocked_jobs_;
+ int ref_count_;
+ base::Mutex ref_count_mutex_;
+ base::ConditionVariable ref_count_zero_;
+
// Copies of FLAG_trace_concurrent_recompilation,
// FLAG_concurrent_recompilation_delay and
// FLAG_job_based_recompilation that will be used from the background thread.
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 985a90f8dc..8ed20ee212 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -23,6 +23,75 @@
namespace v8 {
namespace internal {
+ScriptData::ScriptData(const byte* data, int length)
+ : owns_data_(false), rejected_(false), data_(data), length_(length) {
+ if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
+ byte* copy = NewArray<byte>(length);
+ DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
+ CopyBytes(copy, data, length);
+ data_ = copy;
+ AcquireDataOwnership();
+ }
+}
+
+
+ParseInfo::ParseInfo(Zone* zone)
+ : zone_(zone),
+ flags_(0),
+ source_stream_(nullptr),
+ source_stream_encoding_(ScriptCompiler::StreamedSource::ONE_BYTE),
+ extension_(nullptr),
+ compile_options_(ScriptCompiler::kNoCompileOptions),
+ script_scope_(nullptr),
+ unicode_cache_(nullptr),
+ stack_limit_(0),
+ hash_seed_(0),
+ cached_data_(nullptr),
+ ast_value_factory_(nullptr),
+ literal_(nullptr),
+ scope_(nullptr) {}
+
+
+ParseInfo::ParseInfo(Zone* zone, Handle<JSFunction> function)
+ : ParseInfo(zone, Handle<SharedFunctionInfo>(function->shared())) {
+ set_closure(function);
+ set_context(Handle<Context>(function->context()));
+}
+
+
+ParseInfo::ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared)
+ : ParseInfo(zone) {
+ isolate_ = shared->GetIsolate();
+
+ set_lazy();
+ set_hash_seed(isolate_->heap()->HashSeed());
+ set_stack_limit(isolate_->stack_guard()->real_climit());
+ set_unicode_cache(isolate_->unicode_cache());
+ set_language_mode(shared->language_mode());
+ set_shared_info(shared);
+
+ Handle<Script> script(Script::cast(shared->script()));
+ set_script(script);
+ if (!script.is_null() && script->type()->value() == Script::TYPE_NATIVE) {
+ set_native();
+ }
+}
+
+
+ParseInfo::ParseInfo(Zone* zone, Handle<Script> script) : ParseInfo(zone) {
+ isolate_ = script->GetIsolate();
+
+ set_hash_seed(isolate_->heap()->HashSeed());
+ set_stack_limit(isolate_->stack_guard()->real_climit());
+ set_unicode_cache(isolate_->unicode_cache());
+ set_script(script);
+
+ if (script->type()->value() == Script::TYPE_NATIVE) {
+ set_native();
+ }
+}
+
+
RegExpBuilder::RegExpBuilder(Zone* zone)
: zone_(zone),
pending_empty_(false),
@@ -251,7 +320,7 @@ int ParseData::FunctionsSize() {
}
-void Parser::SetCachedData(CompilationInfo* info) {
+void Parser::SetCachedData(ParseInfo* info) {
if (compile_options_ == ScriptCompiler::kNoCompileOptions) {
cached_parse_data_ = NULL;
} else {
@@ -600,56 +669,46 @@ Expression* ParserTraits::NewThrowError(
void ParserTraits::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- const char* arg,
- bool is_reference_error) {
+ const char* message, const char* arg,
+ ParseErrorType error_type) {
if (parser_->stack_overflow()) {
// Suppress the error message (syntax error or such) in the presence of a
// stack overflow. The isolate allows only one pending exception at at time
// and we want to report the stack overflow later.
return;
}
- parser_->has_pending_error_ = true;
- parser_->pending_error_location_ = source_location;
- parser_->pending_error_message_ = message;
- parser_->pending_error_char_arg_ = arg;
- parser_->pending_error_arg_ = NULL;
- parser_->pending_error_is_reference_error_ = is_reference_error;
+ parser_->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos,
+ message, arg, error_type);
}
-void ParserTraits::ReportMessage(const char* message,
- const char* arg,
- bool is_reference_error) {
+void ParserTraits::ReportMessage(const char* message, const char* arg,
+ ParseErrorType error_type) {
Scanner::Location source_location = parser_->scanner()->location();
- ReportMessageAt(source_location, message, arg, is_reference_error);
+ ReportMessageAt(source_location, message, arg, error_type);
}
-void ParserTraits::ReportMessage(const char* message,
- const AstRawString* arg,
- bool is_reference_error) {
+void ParserTraits::ReportMessage(const char* message, const AstRawString* arg,
+ ParseErrorType error_type) {
Scanner::Location source_location = parser_->scanner()->location();
- ReportMessageAt(source_location, message, arg, is_reference_error);
+ ReportMessageAt(source_location, message, arg, error_type);
}
void ParserTraits::ReportMessageAt(Scanner::Location source_location,
- const char* message,
- const AstRawString* arg,
- bool is_reference_error) {
+ const char* message, const AstRawString* arg,
+ ParseErrorType error_type) {
if (parser_->stack_overflow()) {
// Suppress the error message (syntax error or such) in the presence of a
// stack overflow. The isolate allows only one pending exception at at time
// and we want to report the stack overflow later.
return;
}
- parser_->has_pending_error_ = true;
- parser_->pending_error_location_ = source_location;
- parser_->pending_error_message_ = message;
- parser_->pending_error_char_arg_ = NULL;
- parser_->pending_error_arg_ = arg;
- parser_->pending_error_is_reference_error_ = is_reference_error;
+ parser_->pending_error_handler_.ReportMessageAt(source_location.beg_pos,
+ source_location.end_pos,
+ message, arg, error_type);
}
@@ -704,6 +763,10 @@ Literal* ParserTraits::ExpressionFromLiteral(Token::Value token, int pos,
return factory->NewBooleanLiteral(true, pos);
case Token::FALSE_LITERAL:
return factory->NewBooleanLiteral(false, pos);
+ case Token::SMI: {
+ int value = scanner->smi_value();
+ return factory->NewSmiLiteral(value, pos);
+ }
case Token::NUMBER: {
double value = scanner->DoubleValue();
return factory->NewNumberLiteral(value, pos);
@@ -716,7 +779,9 @@ Literal* ParserTraits::ExpressionFromLiteral(Token::Value token, int pos,
Expression* ParserTraits::ExpressionFromIdentifier(const AstRawString* name,
- int pos, Scope* scope,
+ int start_position,
+ int end_position,
+ Scope* scope,
AstNodeFactory* factory) {
if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
@@ -725,8 +790,10 @@ Expression* ParserTraits::ExpressionFromIdentifier(const AstRawString* name,
// for Traits::DeclareArrowParametersFromExpression() to be able to
// pick the names of the parameters.
return parser_->parsing_lazy_arrow_parameters_
- ? factory->NewVariableProxy(name, false, pos)
- : scope->NewUnresolved(factory, name, pos);
+ ? factory->NewVariableProxy(name, Variable::NORMAL, start_position,
+ end_position)
+ : scope->NewUnresolved(factory, name, start_position,
+ end_position);
}
@@ -781,38 +848,31 @@ ClassLiteral* ParserTraits::ParseClassLiteral(
}
-Parser::Parser(CompilationInfo* info, uintptr_t stack_limit, uint32_t hash_seed,
- UnicodeCache* unicode_cache)
- : ParserBase<ParserTraits>(info->zone(), &scanner_, stack_limit,
+Parser::Parser(ParseInfo* info)
+ : ParserBase<ParserTraits>(info->zone(), &scanner_, info->stack_limit(),
info->extension(), info->ast_value_factory(),
NULL, this),
- scanner_(unicode_cache),
+ scanner_(info->unicode_cache()),
reusable_preparser_(NULL),
original_scope_(NULL),
target_stack_(NULL),
compile_options_(info->compile_options()),
cached_parse_data_(NULL),
parsing_lazy_arrow_parameters_(false),
- has_pending_error_(false),
- pending_error_message_(NULL),
- pending_error_arg_(NULL),
- pending_error_char_arg_(NULL),
total_preparse_skipped_(0),
pre_parse_timer_(NULL),
parsing_on_main_thread_(true) {
- // Even though we were passed CompilationInfo, we should not store it in
+ // Even though we were passed ParseInfo, we should not store it in
// Parser - this makes sure that Isolate is not accidentally accessed via
- // CompilationInfo during background parsing.
+ // ParseInfo during background parsing.
DCHECK(!info->script().is_null() || info->source_stream() != NULL);
- set_allow_lazy(false); // Must be explicitly enabled.
+ set_allow_lazy(info->allow_lazy_parsing());
set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
- set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping);
set_allow_harmony_modules(!info->is_native() && FLAG_harmony_modules);
set_allow_harmony_arrow_functions(FLAG_harmony_arrow_functions);
set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
set_allow_harmony_classes(FLAG_harmony_classes);
set_allow_harmony_object_literals(FLAG_harmony_object_literals);
- set_allow_harmony_templates(FLAG_harmony_templates);
set_allow_harmony_sloppy(FLAG_harmony_sloppy);
set_allow_harmony_unicode(FLAG_harmony_unicode);
set_allow_harmony_computed_property_names(
@@ -825,13 +885,14 @@ Parser::Parser(CompilationInfo* info, uintptr_t stack_limit, uint32_t hash_seed,
}
if (info->ast_value_factory() == NULL) {
// info takes ownership of AstValueFactory.
- info->SetAstValueFactory(new AstValueFactory(zone(), hash_seed));
+ info->set_ast_value_factory(new AstValueFactory(zone(), info->hash_seed()));
+ info->set_ast_value_factory_owned();
ast_value_factory_ = info->ast_value_factory();
}
}
-FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
+FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
// TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
// see comment for HistogramTimerScope class.
@@ -839,7 +900,6 @@ FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
// called in the main thread.
DCHECK(parsing_on_main_thread_);
- Isolate* isolate = info->isolate();
HistogramTimerScope timer_scope(isolate->counters()->parse(), true);
Handle<String> source(String::cast(info->script()->source()));
isolate->counters()->total_parse_size()->Increment(source->length());
@@ -880,7 +940,7 @@ FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
if (eval_scope != NULL) {
eval_scope->set_end_position(source->length());
}
- HandleSourceURLComments(info);
+ HandleSourceURLComments(isolate, info->script());
if (FLAG_trace_parse && result != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
@@ -903,18 +963,18 @@ FunctionLiteral* Parser::ParseProgram(CompilationInfo* info) {
}
-FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
+FunctionLiteral* Parser::DoParseProgram(ParseInfo* info, Scope** scope,
Scope** eval_scope) {
// Note that this function can be called from the main thread or from a
// background thread. We should not access anything Isolate / heap dependent
- // via CompilationInfo, and also not pass it forward.
+ // via ParseInfo, and also not pass it forward.
DCHECK(scope_ == NULL);
DCHECK(target_stack_ == NULL);
FunctionLiteral* result = NULL;
{
*scope = NewScope(scope_, SCRIPT_SCOPE);
- info->SetScriptScope(*scope);
+ info->set_script_scope(*scope);
if (!info->context().is_null() && !info->context()->IsNativeContext()) {
*scope = Scope::DeserializeScopeChain(info->isolate(), zone(),
*info->context(), *scope);
@@ -930,8 +990,8 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
if (!(*scope)->is_script_scope() || is_strict(info->language_mode())) {
*scope = NewScope(*scope, EVAL_SCOPE);
}
- } else if (info->is_global()) {
- *scope = NewScope(*scope, SCRIPT_SCOPE);
+ } else if (info->is_module()) {
+ *scope = NewScope(*scope, MODULE_SCOPE);
}
(*scope)->set_start_position(0);
// End position will be set by the caller.
@@ -955,19 +1015,13 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
int beg_pos = scanner()->location().beg_pos;
if (info->is_module()) {
DCHECK(allow_harmony_modules());
- Statement* stmt = ParseModule(&ok);
- if (ok) {
- body->Add(stmt, zone());
- }
+ ParseModuleItemList(body, &ok);
} else {
ParseStatementList(body, Token::EOS, info->is_eval(), eval_scope, &ok);
}
if (ok && is_strict(language_mode())) {
CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
- }
-
- if (ok && allow_harmony_scoping() && is_strict(language_mode())) {
CheckConflictingVarDeclarations(scope_, &ok);
}
@@ -1000,13 +1054,13 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
}
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
+FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info) {
// It's OK to use the Isolate & counters here, since this function is only
// called in the main thread.
DCHECK(parsing_on_main_thread_);
- HistogramTimerScope timer_scope(info->isolate()->counters()->parse_lazy());
+ HistogramTimerScope timer_scope(isolate->counters()->parse_lazy());
Handle<String> source(String::cast(info->script()->source()));
- info->isolate()->counters()->total_parse_size()->Increment(source->length());
+ isolate->counters()->total_parse_size()->Increment(source->length());
base::ElapsedTimer timer;
if (FLAG_trace_parse) {
timer.Start();
@@ -1021,12 +1075,12 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
Handle<ExternalTwoByteString>::cast(source),
shared_info->start_position(),
shared_info->end_position());
- result = ParseLazy(info, &stream);
+ result = ParseLazy(isolate, info, &stream);
} else {
GenericStringUtf16CharacterStream stream(source,
shared_info->start_position(),
shared_info->end_position());
- result = ParseLazy(info, &stream);
+ result = ParseLazy(isolate, info, &stream);
}
if (FLAG_trace_parse && result != NULL) {
@@ -1038,7 +1092,7 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info) {
}
-FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
+FunctionLiteral* Parser::ParseLazy(Isolate* isolate, ParseInfo* info,
Utf16CharacterStream* source) {
Handle<SharedFunctionInfo> shared_info = info->shared_info();
scanner_.Initialize(source);
@@ -1059,12 +1113,12 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
{
// Parse the function literal.
Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
- info->SetScriptScope(scope);
+ info->set_script_scope(scope);
if (!info->closure().is_null()) {
// Ok to use Isolate here, since lazy function parsing is only done in the
// main thread.
DCHECK(parsing_on_main_thread_);
- scope = Scope::DeserializeScopeChain(info->isolate(), zone(),
+ scope = Scope::DeserializeScopeChain(isolate, zone(),
info->closure()->context(), scope);
}
original_scope_ = scope;
@@ -1088,8 +1142,24 @@ FunctionLiteral* Parser::ParseLazy(CompilationInfo* info,
// from creating unresolved variables in already-resolved scopes.
parsing_lazy_arrow_parameters_ = true;
Expression* expression = ParseExpression(false, &ok);
- DCHECK(expression->IsFunctionLiteral());
- result = expression->AsFunctionLiteral();
+ if (ok) {
+ // Scanning must end at the same position that was recorded
+ // previously. If not, parsing has been interrupted due to a
+ // stack overflow, at which point the partially parsed arrow
+ // function concise body happens to be a valid expression. This
+ // is a problem only for arrow functions with single statement
+ // bodies, since there is no end token such as "}" for normal
+ // functions.
+ if (scanner()->location().end_pos == shared_info->end_position()) {
+ // The pre-parser saw an arrow function here, so the full parser
+ // must produce a FunctionLiteral.
+ DCHECK(expression->IsFunctionLiteral());
+ result = expression->AsFunctionLiteral();
+ } else {
+ result = NULL;
+ ok = false;
+ }
+ }
} else if (shared_info->is_default_constructor()) {
result = DefaultConstructor(IsSubclassConstructor(shared_info->kind()),
scope, shared_info->start_position(),
@@ -1135,8 +1205,27 @@ void* Parser::ParseStatementList(ZoneList<Statement*>* body, int end_token,
directive_prologue = false;
}
+ Token::Value token = peek();
Scanner::Location token_loc = scanner()->peek_location();
+ Scanner::Location old_super_loc = function_state_->super_call_location();
Statement* stat = ParseStatementListItem(CHECK_OK);
+ Scanner::Location super_loc = function_state_->super_call_location();
+
+ if (is_strong(language_mode()) &&
+ i::IsConstructor(function_state_->kind()) &&
+ !old_super_loc.IsValid() && super_loc.IsValid() &&
+ token != Token::SUPER) {
+ // TODO(rossberg): This is more permissive than spec'ed, it allows e.g.
+ // super(), 1;
+ // super() + "";
+ // super() = 0;
+ // That should still be safe, though, thanks to left-to-right evaluation.
+ // The proper check would be difficult to implement in the preparser.
+ ReportMessageAt(super_loc, "strong_super_call_nested");
+ *ok = false;
+ return NULL;
+ }
+
if (stat == NULL || stat->IsEmpty()) {
directive_prologue = false; // End of directive prologue.
continue;
@@ -1229,7 +1318,6 @@ Statement* Parser::ParseStatementListItem(bool* ok) {
case Token::VAR:
return ParseVariableStatement(kStatementListItem, NULL, ok);
case Token::LET:
- DCHECK(allow_harmony_scoping());
if (is_strict(language_mode())) {
return ParseVariableStatement(kStatementListItem, NULL, ok);
}
@@ -1258,7 +1346,7 @@ Statement* Parser::ParseModuleItem(bool* ok) {
}
-Statement* Parser::ParseModule(bool* ok) {
+void* Parser::ParseModuleItemList(ZoneList<Statement*>* body, bool* ok) {
// (Ecma 262 6th Edition, 15.2):
// Module :
// ModuleBody?
@@ -1266,53 +1354,48 @@ Statement* Parser::ParseModule(bool* ok) {
// ModuleBody :
// ModuleItem*
- Block* body = factory()->NewBlock(NULL, 16, false, RelocInfo::kNoPosition);
- Scope* scope = NewScope(scope_, MODULE_SCOPE);
- scope->set_start_position(scanner()->location().beg_pos);
- scope->SetLanguageMode(
- static_cast<LanguageMode>(scope->language_mode() | STRICT_BIT));
-
- {
- BlockState block_state(&scope_, scope);
+ DCHECK(scope_->is_module_scope());
+ scope_->SetLanguageMode(
+ static_cast<LanguageMode>(scope_->language_mode() | STRICT_BIT));
- while (peek() != Token::EOS) {
- Statement* stat = ParseModuleItem(CHECK_OK);
- if (stat && !stat->IsEmpty()) {
- body->AddStatement(stat, zone());
- }
+ while (peek() != Token::EOS) {
+ Statement* stat = ParseModuleItem(CHECK_OK);
+ if (stat && !stat->IsEmpty()) {
+ body->Add(stat, zone());
}
}
- scope->set_end_position(scanner()->location().end_pos);
- body->set_scope(scope);
-
// Check that all exports are bound.
- ModuleDescriptor* descriptor = scope->module();
+ ModuleDescriptor* descriptor = scope_->module();
for (ModuleDescriptor::Iterator it = descriptor->iterator(); !it.done();
it.Advance()) {
- if (scope->LookupLocal(it.name()) == NULL) {
- ParserTraits::ReportMessage("module_export_undefined", it.name());
+ if (scope_->LookupLocal(it.local_name()) == NULL) {
+ // TODO(adamk): Pass both local_name and export_name once ParserTraits
+ // supports multiple arg error messages.
+ // Also try to report this at a better location.
+ ParserTraits::ReportMessage("module_export_undefined", it.local_name());
*ok = false;
return NULL;
}
}
- scope->module()->Freeze();
- return body;
+ scope_->module()->Freeze();
+ return NULL;
}
-Literal* Parser::ParseModuleSpecifier(bool* ok) {
+const AstRawString* Parser::ParseModuleSpecifier(bool* ok) {
// ModuleSpecifier :
// StringLiteral
- int pos = peek_position();
Expect(Token::STRING, CHECK_OK);
- return factory()->NewStringLiteral(GetSymbol(scanner()), pos);
+ return GetSymbol(scanner());
}
-void* Parser::ParseExportClause(ZoneList<const AstRawString*>* names,
+void* Parser::ParseExportClause(ZoneList<const AstRawString*>* export_names,
+ ZoneList<Scanner::Location>* export_locations,
+ ZoneList<const AstRawString*>* local_names,
Scanner::Location* reserved_loc, bool* ok) {
// ExportClause :
// '{' '}'
@@ -1337,14 +1420,17 @@ void* Parser::ParseExportClause(ZoneList<const AstRawString*>* names,
!Token::IsIdentifier(name_tok, STRICT, false)) {
*reserved_loc = scanner()->location();
}
- const AstRawString* name = ParseIdentifierName(CHECK_OK);
- names->Add(name, zone());
+ const AstRawString* local_name = ParseIdentifierName(CHECK_OK);
const AstRawString* export_name = NULL;
if (CheckContextualKeyword(CStrVector("as"))) {
export_name = ParseIdentifierName(CHECK_OK);
}
- // TODO(ES6): Return the export_name as well as the name.
- USE(export_name);
+ if (export_name == NULL) {
+ export_name = local_name;
+ }
+ export_names->Add(export_name, zone());
+ local_names->Add(local_name, zone());
+ export_locations->Add(scanner()->location(), zone());
if (peek() == Token::RBRACE) break;
Expect(Token::COMMA, CHECK_OK);
}
@@ -1355,8 +1441,7 @@ void* Parser::ParseExportClause(ZoneList<const AstRawString*>* names,
}
-void* Parser::ParseNamedImports(ZoneList<const AstRawString*>* names,
- bool* ok) {
+ZoneList<ImportDeclaration*>* Parser::ParseNamedImports(int pos, bool* ok) {
// NamedImports :
// '{' '}'
// '{' ImportsList '}'
@@ -1372,34 +1457,38 @@ void* Parser::ParseNamedImports(ZoneList<const AstRawString*>* names,
Expect(Token::LBRACE, CHECK_OK);
- Token::Value name_tok;
- while ((name_tok = peek()) != Token::RBRACE) {
- const AstRawString* name = ParseIdentifierName(CHECK_OK);
- const AstRawString* import_name = NULL;
+ ZoneList<ImportDeclaration*>* result =
+ new (zone()) ZoneList<ImportDeclaration*>(1, zone());
+ while (peek() != Token::RBRACE) {
+ const AstRawString* import_name = ParseIdentifierName(CHECK_OK);
+ const AstRawString* local_name = import_name;
// In the presence of 'as', the left-side of the 'as' can
// be any IdentifierName. But without 'as', it must be a valid
- // BindingIdentiifer.
+ // BindingIdentifier.
if (CheckContextualKeyword(CStrVector("as"))) {
- import_name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
- } else if (!Token::IsIdentifier(name_tok, STRICT, false)) {
+ local_name = ParseIdentifierName(CHECK_OK);
+ }
+ if (!Token::IsIdentifier(scanner()->current_token(), STRICT, false)) {
*ok = false;
- ReportMessageAt(scanner()->location(), "unexpected_reserved");
+ ReportMessage("unexpected_reserved");
return NULL;
- } else if (IsEvalOrArguments(name)) {
+ } else if (IsEvalOrArguments(local_name)) {
*ok = false;
- ReportMessageAt(scanner()->location(), "strict_eval_arguments");
+ ReportMessage("strict_eval_arguments");
return NULL;
}
- // TODO(ES6): Return the import_name as well as the name.
- names->Add(name, zone());
- USE(import_name);
+ VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
+ ImportDeclaration* declaration =
+ factory()->NewImportDeclaration(proxy, import_name, NULL, scope_, pos);
+ Declare(declaration, true, CHECK_OK);
+ result->Add(declaration, zone());
if (peek() == Token::RBRACE) break;
Expect(Token::COMMA, CHECK_OK);
}
Expect(Token::RBRACE, CHECK_OK);
- return NULL;
+ return result;
}
@@ -1425,32 +1514,39 @@ Statement* Parser::ParseImportDeclaration(bool* ok) {
// 'import' ModuleSpecifier ';'
if (tok == Token::STRING) {
- ParseModuleSpecifier(CHECK_OK);
+ const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
ExpectSemicolon(CHECK_OK);
+ // TODO(ES6): Add module to the requested modules of scope_->module().
+ USE(module_specifier);
return factory()->NewEmptyStatement(pos);
}
// Parse ImportedDefaultBinding if present.
- const AstRawString* imported_default_binding = NULL;
+ ImportDeclaration* import_default_declaration = NULL;
if (tok != Token::MUL && tok != Token::LBRACE) {
- imported_default_binding =
+ const AstRawString* local_name =
ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
+ VariableProxy* proxy = NewUnresolved(local_name, IMPORT);
+ import_default_declaration = factory()->NewImportDeclaration(
+ proxy, ast_value_factory()->default_string(), NULL, scope_, pos);
+ Declare(import_default_declaration, true, CHECK_OK);
}
const AstRawString* module_instance_binding = NULL;
- ZoneList<const AstRawString*> names(1, zone());
- if (imported_default_binding == NULL || Check(Token::COMMA)) {
+ ZoneList<ImportDeclaration*>* named_declarations = NULL;
+ if (import_default_declaration == NULL || Check(Token::COMMA)) {
switch (peek()) {
case Token::MUL: {
Consume(Token::MUL);
ExpectContextualKeyword(CStrVector("as"), CHECK_OK);
module_instance_binding =
ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
+ // TODO(ES6): Add an appropriate declaration.
break;
}
case Token::LBRACE:
- ParseNamedImports(&names, CHECK_OK);
+ named_declarations = ParseNamedImports(pos, CHECK_OK);
break;
default:
@@ -1461,21 +1557,21 @@ Statement* Parser::ParseImportDeclaration(bool* ok) {
}
ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
- Literal* module = ParseModuleSpecifier(CHECK_OK);
- USE(module);
-
+ const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
ExpectSemicolon(CHECK_OK);
if (module_instance_binding != NULL) {
- // TODO(ES6): Bind name to the Module Instance Object of module.
+ // TODO(ES6): Set the module specifier for the module namespace binding.
}
- if (imported_default_binding != NULL) {
- // TODO(ES6): Add an appropriate declaration.
+ if (import_default_declaration != NULL) {
+ import_default_declaration->set_module_specifier(module_specifier);
}
- for (int i = 0; i < names.length(); ++i) {
- // TODO(ES6): Add an appropriate declaration for each name
+ if (named_declarations != NULL) {
+ for (int i = 0; i < named_declarations->length(); ++i) {
+ named_declarations->at(i)->set_module_specifier(module_specifier);
+ }
}
return factory()->NewEmptyStatement(pos);
@@ -1488,16 +1584,20 @@ Statement* Parser::ParseExportDefault(bool* ok) {
// 'export' 'default' ClassDeclaration
// 'export' 'default' AssignmentExpression[In] ';'
+ Expect(Token::DEFAULT, CHECK_OK);
+ Scanner::Location default_loc = scanner()->location();
+
+ ZoneList<const AstRawString*> names(1, zone());
Statement* result = NULL;
switch (peek()) {
case Token::FUNCTION:
// TODO(ES6): Support parsing anonymous function declarations here.
- result = ParseFunctionDeclaration(NULL, CHECK_OK);
+ result = ParseFunctionDeclaration(&names, CHECK_OK);
break;
case Token::CLASS:
// TODO(ES6): Support parsing anonymous class declarations here.
- result = ParseClassDeclaration(NULL, CHECK_OK);
+ result = ParseClassDeclaration(&names, CHECK_OK);
break;
default: {
@@ -1509,7 +1609,20 @@ Statement* Parser::ParseExportDefault(bool* ok) {
}
}
- // TODO(ES6): Add default export to scope_->module()
+ const AstRawString* default_string = ast_value_factory()->default_string();
+
+ DCHECK_LE(names.length(), 1);
+ if (names.length() == 1) {
+ scope_->module()->AddLocalExport(default_string, names.first(), zone(), ok);
+ if (!*ok) {
+ ParserTraits::ReportMessageAt(default_loc, "duplicate_export",
+ default_string);
+ return NULL;
+ }
+ } else {
+ // TODO(ES6): Assign result to a const binding with the name "*default*"
+ // and add an export entry with "*default*" as the local name.
+ }
return result;
}
@@ -1528,23 +1641,18 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
Statement* result = NULL;
ZoneList<const AstRawString*> names(1, zone());
- bool is_export_from = false;
switch (peek()) {
case Token::DEFAULT:
- Consume(Token::DEFAULT);
return ParseExportDefault(ok);
case Token::MUL: {
Consume(Token::MUL);
ExpectContextualKeyword(CStrVector("from"), CHECK_OK);
- Literal* module = ParseModuleSpecifier(CHECK_OK);
+ const AstRawString* module_specifier = ParseModuleSpecifier(CHECK_OK);
ExpectSemicolon(CHECK_OK);
- // TODO(ES6): Do something with the return value
- // of ParseModuleSpecifier.
- USE(module);
- is_export_from = true;
- result = factory()->NewEmptyStatement(pos);
- break;
+ // TODO(ES6): scope_->module()->AddStarExport(...)
+ USE(module_specifier);
+ return factory()->NewEmptyStatement(pos);
}
case Token::LBRACE: {
@@ -1560,13 +1668,14 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
// encountered, and then throw a SyntaxError if we are in the
// non-FromClause case.
Scanner::Location reserved_loc = Scanner::Location::invalid();
- ParseExportClause(&names, &reserved_loc, CHECK_OK);
+ ZoneList<const AstRawString*> export_names(1, zone());
+ ZoneList<Scanner::Location> export_locations(1, zone());
+ ZoneList<const AstRawString*> local_names(1, zone());
+ ParseExportClause(&export_names, &export_locations, &local_names,
+ &reserved_loc, CHECK_OK);
+ const AstRawString* indirect_export_module_specifier = NULL;
if (CheckContextualKeyword(CStrVector("from"))) {
- Literal* module = ParseModuleSpecifier(CHECK_OK);
- // TODO(ES6): Do something with the return value
- // of ParseModuleSpecifier.
- USE(module);
- is_export_from = true;
+ indirect_export_module_specifier = ParseModuleSpecifier(CHECK_OK);
} else if (reserved_loc.IsValid()) {
// No FromClause, so reserved words are invalid in ExportClause.
*ok = false;
@@ -1574,8 +1683,25 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
return NULL;
}
ExpectSemicolon(CHECK_OK);
- result = factory()->NewEmptyStatement(pos);
- break;
+ const int length = export_names.length();
+ DCHECK_EQ(length, local_names.length());
+ DCHECK_EQ(length, export_locations.length());
+ if (indirect_export_module_specifier == NULL) {
+ for (int i = 0; i < length; ++i) {
+ scope_->module()->AddLocalExport(export_names[i], local_names[i],
+ zone(), ok);
+ if (!*ok) {
+ ParserTraits::ReportMessageAt(export_locations[i],
+ "duplicate_export", export_names[i]);
+ return NULL;
+ }
+ }
+ } else {
+ for (int i = 0; i < length; ++i) {
+ // TODO(ES6): scope_->module()->AddIndirectExport(...);(
+ }
+ }
+ return factory()->NewEmptyStatement(pos);
}
case Token::FUNCTION:
@@ -1598,37 +1724,18 @@ Statement* Parser::ParseExportDeclaration(bool* ok) {
return NULL;
}
- // Every export of a module may be assigned.
+ // Extract declared names into export declarations.
+ ModuleDescriptor* descriptor = scope_->module();
for (int i = 0; i < names.length(); ++i) {
- Variable* var = scope_->Lookup(names[i]);
- if (var == NULL) {
- // TODO(sigurds) This is an export that has no definition yet,
- // not clear what to do in this case.
- continue;
- }
- if (!IsImmutableVariableMode(var->mode())) {
- var->set_maybe_assigned();
- }
- }
-
- // TODO(ES6): Handle 'export from' once imports are properly implemented.
- // For now we just drop such exports on the floor.
- if (!is_export_from) {
- // Extract declared names into export declarations and module descriptor.
- ModuleDescriptor* descriptor = scope_->module();
- for (int i = 0; i < names.length(); ++i) {
- // TODO(adamk): Make early errors here provide the right error message
- // (duplicate exported names).
- descriptor->Add(names[i], zone(), CHECK_OK);
- // TODO(rossberg): Rethink whether we actually need to store export
- // declarations (for compilation?).
- // ExportDeclaration* declaration =
- // factory()->NewExportDeclaration(proxy, scope_, position);
- // scope_->AddDeclaration(declaration);
+ descriptor->AddLocalExport(names[i], names[i], zone(), ok);
+ if (!*ok) {
+ // TODO(adamk): Possibly report this error at the right place.
+ ParserTraits::ReportMessage("duplicate_export", names[i]);
+ return NULL;
}
}
- DCHECK(result != NULL);
+ DCHECK_NOT_NULL(result);
return result;
}
@@ -1777,11 +1884,13 @@ VariableProxy* Parser::NewUnresolved(const AstRawString* name,
// scope.
// Let/const variables in harmony mode are always added to the immediately
// enclosing scope.
- return DeclarationScope(mode)->NewUnresolved(factory(), name, position());
+ return DeclarationScope(mode)->NewUnresolved(factory(), name,
+ scanner()->location().beg_pos,
+ scanner()->location().end_pos);
}
-void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
+Variable* Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
VariableProxy* proxy = declaration->proxy();
DCHECK(proxy->raw_name() != NULL);
const AstRawString* name = proxy->raw_name();
@@ -1807,10 +1916,14 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
if (var == NULL) {
// Declare the name.
var = declaration_scope->DeclareLocal(
- name, mode, declaration->initialization(), kNotAssigned);
- } else if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode())
- || ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
- !declaration_scope->is_script_scope())) {
+ name, mode, declaration->initialization(),
+ declaration->IsFunctionDeclaration() ? Variable::FUNCTION
+ : Variable::NORMAL,
+ kNotAssigned);
+ } else if (IsLexicalVariableMode(mode) ||
+ IsLexicalVariableMode(var->mode()) ||
+ ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
+ !declaration_scope->is_script_scope())) {
// The name was declared in this scope before; check for conflicting
// re-declarations. We have a conflict if either of the declarations is
// not a var (in script scope, we also have to ignore legacy const for
@@ -1825,12 +1938,12 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// because the var declaration is hoisted to the function scope where 'x'
// is already bound.
DCHECK(IsDeclaredVariableMode(var->mode()));
- if (allow_harmony_scoping() && is_strict(language_mode())) {
+ if (is_strict(language_mode())) {
// In harmony we treat re-declarations as early errors. See
// ES5 16 for a definition of early errors.
ParserTraits::ReportMessage("var_redeclaration", name);
*ok = false;
- return;
+ return nullptr;
}
Expression* expression = NewThrowTypeError(
"var_redeclaration", name, declaration->position());
@@ -1862,7 +1975,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// For global const variables we bind the proxy to a variable.
DCHECK(resolve); // should be set by all callers
Variable::Kind kind = Variable::NORMAL;
- var = new (zone()) Variable(declaration_scope, name, mode, true, kind,
+ var = new (zone()) Variable(declaration_scope, name, mode, kind,
kNeedsInitialization, kNotAssigned);
} else if (declaration_scope->is_eval_scope() &&
is_sloppy(declaration_scope->language_mode())) {
@@ -1871,7 +1984,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
// DeclareLookupSlot runtime function.
Variable::Kind kind = Variable::NORMAL;
// TODO(sigurds) figure out if kNotAssigned is OK here
- var = new (zone()) Variable(declaration_scope, name, mode, true, kind,
+ var = new (zone()) Variable(declaration_scope, name, mode, kind,
declaration->initialization(), kNotAssigned);
var->AllocateTo(Variable::LOOKUP, -1);
resolve = true;
@@ -1904,6 +2017,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
if (resolve && var != NULL) {
proxy->BindTo(var);
}
+ return var;
}
@@ -1975,12 +2089,13 @@ Statement* Parser::ParseFunctionDeclaration(
// In ES6, a function behaves as a lexical binding, except in
// a script scope, or the initial scope of eval or another function.
VariableMode mode =
- is_strong(language_mode()) ? CONST :
- allow_harmony_scoping() && is_strict(language_mode()) &&
- !(scope_->is_script_scope() || scope_->is_eval_scope() ||
- scope_->is_function_scope())
- ? LET
- : VAR;
+ is_strong(language_mode())
+ ? CONST
+ : is_strict(language_mode()) &&
+ !(scope_->is_script_scope() || scope_->is_eval_scope() ||
+ scope_->is_function_scope())
+ ? LET
+ : VAR;
VariableProxy* proxy = NewUnresolved(name, mode);
Declaration* declaration =
factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
@@ -2024,7 +2139,7 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
Declare(declaration, true, CHECK_OK);
- proxy->var()->set_initializer_position(pos);
+ proxy->var()->set_initializer_position(position());
Token::Value init_op =
is_strong(language_mode()) ? Token::INIT_CONST : Token::INIT_LET;
@@ -2037,7 +2152,7 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
- if (allow_harmony_scoping() && is_strict(language_mode())) {
+ if (is_strict(language_mode())) {
return ParseScopedBlock(labels, ok);
}
@@ -2159,19 +2274,12 @@ Block* Parser::ParseVariableDeclarations(
init_op = Token::INIT_CONST_LEGACY;
} else {
DCHECK(var_context != kStatement);
- // In ES5 const is not allowed in strict mode.
- if (!allow_harmony_scoping()) {
- ReportMessage("strict_const");
- *ok = false;
- return NULL;
- }
mode = CONST;
init_op = Token::INIT_CONST;
}
is_const = true;
needs_init = true;
} else if (peek() == Token::LET && is_strict(language_mode())) {
- DCHECK(allow_harmony_scoping());
Consume(Token::LET);
DCHECK(var_context != kStatement);
mode = LET;
@@ -2233,7 +2341,9 @@ Block* Parser::ParseVariableDeclarations(
VariableProxy* proxy = NewUnresolved(name, mode);
Declaration* declaration =
factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
- Declare(declaration, mode != VAR, CHECK_OK);
+ Variable* var = Declare(declaration, mode != VAR, CHECK_OK);
+ DCHECK_NOT_NULL(var);
+ DCHECK(!proxy->is_resolved() || proxy->var() == var);
nvars++;
if (declaration_scope->num_var_or_const() > kMaxNumFunctionLocals) {
ReportMessage("too_many_variables");
@@ -2287,11 +2397,11 @@ Block* Parser::ParseVariableDeclarations(
fni_->RemoveLastFunction();
}
if (decl_props != NULL) *decl_props = kHasInitializers;
- }
-
- // Record the end position of the initializer.
- if (proxy->is_resolved()) {
- proxy->var()->set_initializer_position(position());
+ // End position of the initializer is after the assignment expression.
+ var->set_initializer_position(scanner()->location().end_pos);
+ } else {
+ // End position of the initializer is after the variable.
+ var->set_initializer_position(position());
}
// Make sure that 'const x' and 'let x' initialize 'x' to undefined.
@@ -2610,6 +2720,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
Scanner::Location loc = scanner()->location();
+ function_state_->set_return_location(loc);
Token::Value tok = peek();
Statement* result;
@@ -2624,6 +2735,14 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
return_value = GetLiteralUndefined(position());
}
} else {
+ if (is_strong(language_mode()) &&
+ i::IsConstructor(function_state_->kind())) {
+ int pos = peek_position();
+ ReportMessageAt(Scanner::Location(pos, pos + 1),
+ "strong_constructor_return_value");
+ *ok = false;
+ return NULL;
+ }
return_value = ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -2680,8 +2799,8 @@ Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
// CaseClause ::
- // 'case' Expression ':' Statement*
- // 'default' ':' Statement*
+ // 'case' Expression ':' StatementList
+ // 'default' ':' StatementList
Expression* label = NULL; // NULL expression indicates default case
if (peek() == Token::CASE) {
@@ -2703,7 +2822,7 @@ CaseClause* Parser::ParseCaseClause(bool* default_seen_ptr, bool* ok) {
while (peek() != Token::CASE &&
peek() != Token::DEFAULT &&
peek() != Token::RBRACE) {
- Statement* stat = ParseStatement(NULL, CHECK_OK);
+ Statement* stat = ParseStatementListItem(CHECK_OK);
statements->Add(stat, zone());
}
@@ -2796,7 +2915,8 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
Expect(Token::RPAREN, CHECK_OK);
- catch_variable = catch_scope->DeclareLocal(name, VAR, kCreatedInitialized);
+ catch_variable = catch_scope->DeclareLocal(name, VAR, kCreatedInitialized,
+ Variable::NORMAL);
BlockState block_state(&scope_, catch_scope);
catch_block = ParseBlock(NULL, CHECK_OK);
@@ -2910,25 +3030,51 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
Expression* result_done;
Expression* assign_each;
- // var iterator = subject[Symbol.iterator]();
+ // iterator = subject[Symbol.iterator]()
assign_iterator = factory()->NewAssignment(
Token::ASSIGN, factory()->NewVariableProxy(iterator),
GetIterator(subject, factory()), subject->position());
- // var result = iterator.next();
+ // !%_IsSpecObject(result = iterator.next()) &&
+ // %ThrowIteratorResultNotAnObject(result)
{
+ // result = iterator.next()
Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
Expression* next_literal = factory()->NewStringLiteral(
ast_value_factory()->next_string(), RelocInfo::kNoPosition);
Expression* next_property = factory()->NewProperty(
iterator_proxy, next_literal, RelocInfo::kNoPosition);
ZoneList<Expression*>* next_arguments =
- new(zone()) ZoneList<Expression*>(0, zone());
+ new (zone()) ZoneList<Expression*>(0, zone());
Expression* next_call = factory()->NewCall(next_property, next_arguments,
subject->position());
Expression* result_proxy = factory()->NewVariableProxy(result);
next_result = factory()->NewAssignment(Token::ASSIGN, result_proxy,
next_call, subject->position());
+
+ // %_IsSpecObject(...)
+ ZoneList<Expression*>* is_spec_object_args =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ is_spec_object_args->Add(next_result, zone());
+ Expression* is_spec_object_call = factory()->NewCallRuntime(
+ ast_value_factory()->is_spec_object_string(),
+ Runtime::FunctionForId(Runtime::kInlineIsSpecObject),
+ is_spec_object_args, subject->position());
+
+ // %ThrowIteratorResultNotAnObject(result)
+ Expression* result_proxy_again = factory()->NewVariableProxy(result);
+ ZoneList<Expression*>* throw_arguments =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ throw_arguments->Add(result_proxy_again, zone());
+ Expression* throw_call = factory()->NewCallRuntime(
+ ast_value_factory()->throw_iterator_result_not_an_object_string(),
+ Runtime::FunctionForId(Runtime::kThrowIteratorResultNotAnObject),
+ throw_arguments, subject->position());
+
+ next_result = factory()->NewBinaryOperation(
+ Token::AND, factory()->NewUnaryOperation(
+ Token::NOT, is_spec_object_call, subject->position()),
+ throw_call, subject->position());
}
// result.done
@@ -2962,8 +3108,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
}
-Statement* Parser::DesugarLetBindingsInForStatement(
- Scope* inner_scope, ZoneList<const AstRawString*>* names,
+Statement* Parser::DesugarLexicalBindingsInForStatement(
+ Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
Statement* body, bool* ok) {
// ES6 13.6.3.4 specifies that on each loop iteration the let variables are
@@ -2973,16 +3119,16 @@ Statement* Parser::DesugarLetBindingsInForStatement(
//
// We rewrite a for statement of the form
//
- // labels: for (let x = i; cond; next) body
+ // labels: for (let/const x = i; cond; next) body
//
// into
//
// {
- // let x = i;
+ // let/const x = i;
// temp_x = x;
// first = 1;
// outer: for (;;) {
- // let x = temp_x;
+ // let/const x = temp_x;
// if (first == 1) {
// first = 0;
// } else {
@@ -3009,12 +3155,12 @@ Statement* Parser::DesugarLetBindingsInForStatement(
Block* outer_block = factory()->NewBlock(NULL, names->length() + 3, false,
RelocInfo::kNoPosition);
- // Add statement: let x = i.
+ // Add statement: let/const x = i.
outer_block->AddStatement(init, zone());
const AstRawString* temp_name = ast_value_factory()->dot_for_string();
- // For each let variable x:
+ // For each lexical variable x:
// make statement: temp_x = x.
for (int i = 0; i < names->length(); i++) {
VariableProxy* proxy = NewUnresolved(names->at(i), LET);
@@ -3055,23 +3201,24 @@ Statement* Parser::DesugarLetBindingsInForStatement(
Block* inner_block = factory()->NewBlock(NULL, names->length() + 4, false,
RelocInfo::kNoPosition);
- int pos = scanner()->location().beg_pos;
ZoneList<Variable*> inner_vars(names->length(), zone());
// For each let variable x:
- // make statement: let x = temp_x.
+ // make statement: let/const x = temp_x.
+ VariableMode mode = is_const ? CONST : LET;
for (int i = 0; i < names->length(); i++) {
- VariableProxy* proxy = NewUnresolved(names->at(i), LET);
- Declaration* declaration =
- factory()->NewVariableDeclaration(proxy, LET, scope_, pos);
+ VariableProxy* proxy = NewUnresolved(names->at(i), mode);
+ Declaration* declaration = factory()->NewVariableDeclaration(
+ proxy, mode, scope_, RelocInfo::kNoPosition);
Declare(declaration, true, CHECK_OK);
inner_vars.Add(declaration->proxy()->var(), zone());
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
- Assignment* assignment = factory()->NewAssignment(
- Token::INIT_LET, proxy, temp_proxy, pos);
- Statement* assignment_statement = factory()->NewExpressionStatement(
- assignment, pos);
- proxy->var()->set_initializer_position(pos);
+ Assignment* assignment =
+ factory()->NewAssignment(is_const ? Token::INIT_CONST : Token::INIT_LET,
+ proxy, temp_proxy, RelocInfo::kNoPosition);
+ Statement* assignment_statement =
+ factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
+ proxy->var()->set_initializer_position(init->position());
inner_block->AddStatement(assignment_statement, zone());
}
@@ -3083,8 +3230,8 @@ Statement* Parser::DesugarLetBindingsInForStatement(
{
Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
VariableProxy* first_proxy = factory()->NewVariableProxy(first);
- compare =
- factory()->NewCompareOperation(Token::EQ, first_proxy, const1, pos);
+ compare = factory()->NewCompareOperation(Token::EQ, first_proxy, const1,
+ RelocInfo::kNoPosition);
}
Statement* clear_first = NULL;
// Make statement: first = 0.
@@ -3096,8 +3243,8 @@ Statement* Parser::DesugarLetBindingsInForStatement(
clear_first =
factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
}
- Statement* clear_first_or_next = factory()->NewIfStatement(
- compare, clear_first, next, RelocInfo::kNoPosition);
+ Statement* clear_first_or_next =
+ factory()->NewIfStatement(compare, clear_first, next, next->position());
inner_block->AddStatement(clear_first_or_next, zone());
}
@@ -3118,8 +3265,8 @@ Statement* Parser::DesugarLetBindingsInForStatement(
{
Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
- flag_cond =
- factory()->NewCompareOperation(Token::EQ, flag_proxy, const1, pos);
+ flag_cond = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
+ RelocInfo::kNoPosition);
}
// Create chain of expressions "flag = 0, temp_x = x, ..."
@@ -3135,9 +3282,11 @@ Statement* Parser::DesugarLetBindingsInForStatement(
}
// Make the comma-separated list of temp_x = x assignments.
+ int inner_var_proxy_pos = scanner()->location().beg_pos;
for (int i = 0; i < names->length(); i++) {
VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
- VariableProxy* proxy = factory()->NewVariableProxy(inner_vars.at(i), pos);
+ VariableProxy* proxy =
+ factory()->NewVariableProxy(inner_vars.at(i), inner_var_proxy_pos);
Assignment* assignment = factory()->NewAssignment(
Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
compound_next = factory()->NewBinaryOperation(
@@ -3171,8 +3320,8 @@ Statement* Parser::DesugarLetBindingsInForStatement(
{
Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
- compare =
- factory()->NewCompareOperation(Token::EQ, flag_proxy, const1, pos);
+ compare = factory()->NewCompareOperation(Token::EQ, flag_proxy, const1,
+ RelocInfo::kNoPosition);
}
Statement* stop =
factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
@@ -3197,8 +3346,9 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// 'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
int stmt_pos = peek_position();
+ bool is_const = false;
Statement* init = NULL;
- ZoneList<const AstRawString*> let_bindings(1, zone());
+ ZoneList<const AstRawString*> lexical_bindings(1, zone());
// Create an in-between scope for let-bound iteration variables.
Scope* saved_scope = scope_;
@@ -3219,7 +3369,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
CHECK_OK);
bool accept_OF = decl_props == kHasNoInitializers;
ForEachStatement::VisitMode mode;
- int each_pos = position();
+ int each_beg_pos = scanner()->location().beg_pos;
+ int each_end_pos = scanner()->location().end_pos;
if (name != NULL && CheckInOrOf(accept_OF, &mode, ok)) {
if (!*ok) return nullptr;
@@ -3230,7 +3381,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Expression* enumerable = ParseExpression(true, CHECK_OK);
Expect(Token::RPAREN, CHECK_OK);
- VariableProxy* each = scope_->NewUnresolved(factory(), name, each_pos);
+ VariableProxy* each =
+ scope_->NewUnresolved(factory(), name, each_beg_pos, each_end_pos);
Statement* body = ParseSubStatement(NULL, CHECK_OK);
InitializeForEachStatement(loop, each, enumerable, body);
Block* result =
@@ -3248,16 +3400,17 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
}
} else if ((peek() == Token::LET || peek() == Token::CONST) &&
is_strict(language_mode())) {
- bool is_const = peek() == Token::CONST;
+ is_const = peek() == Token::CONST;
const AstRawString* name = NULL;
VariableDeclarationProperties decl_props = kHasNoInitializers;
Block* variable_statement =
- ParseVariableDeclarations(kForStatement, &decl_props, &let_bindings,
- &name, CHECK_OK);
+ ParseVariableDeclarations(kForStatement, &decl_props,
+ &lexical_bindings, &name, CHECK_OK);
bool accept_IN = name != NULL && decl_props != kHasInitializers;
bool accept_OF = decl_props == kHasNoInitializers;
ForEachStatement::VisitMode mode;
- int each_pos = position();
+ int each_beg_pos = scanner()->location().beg_pos;
+ int each_end_pos = scanner()->location().end_pos;
if (accept_IN && CheckInOrOf(accept_OF, &mode, ok)) {
if (!*ok) return nullptr;
@@ -3279,7 +3432,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// implementing stack allocated block scoped variables.
Variable* temp = scope_->DeclarationScope()->NewTemporary(
ast_value_factory()->dot_for_string());
- VariableProxy* temp_proxy = factory()->NewVariableProxy(temp, each_pos);
+ VariableProxy* temp_proxy =
+ factory()->NewVariableProxy(temp, each_beg_pos, each_end_pos);
ForEachStatement* loop =
factory()->NewForEachStatement(mode, labels, stmt_pos);
Target target(&this->target_stack_, loop);
@@ -3290,7 +3444,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
scope_ = for_scope;
Expect(Token::RPAREN, CHECK_OK);
- VariableProxy* each = scope_->NewUnresolved(factory(), name, each_pos);
+ VariableProxy* each =
+ scope_->NewUnresolved(factory(), name, each_beg_pos, each_end_pos);
Statement* body = ParseSubStatement(NULL, CHECK_OK);
Block* body_block =
factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
@@ -3367,7 +3522,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
// If there are let bindings, then condition and the next statement of the
// for loop must be parsed in a new scope.
Scope* inner_scope = NULL;
- if (let_bindings.length() > 0) {
+ if (lexical_bindings.length() > 0) {
inner_scope = NewScope(for_scope, BLOCK_SCOPE);
inner_scope->set_start_position(scanner()->location().beg_pos);
scope_ = inner_scope;
@@ -3390,10 +3545,11 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
Statement* body = ParseSubStatement(NULL, CHECK_OK);
Statement* result = NULL;
- if (let_bindings.length() > 0) {
+ if (lexical_bindings.length() > 0) {
scope_ = for_scope;
- result = DesugarLetBindingsInForStatement(inner_scope, &let_bindings, loop,
- init, cond, next, body, CHECK_OK);
+ result = DesugarLexicalBindingsInForStatement(
+ inner_scope, is_const, &lexical_bindings, loop, init, cond,
+ next, body, CHECK_OK);
scope_ = saved_scope;
for_scope->set_end_position(scanner()->location().end_pos);
} else {
@@ -3512,6 +3668,10 @@ bool CheckAndDeclareArrowParameter(ParserTraits* traits, Expression* expression,
return false;
}
+ // When the variable was seen, it was recorded as unresolved in the outer
+ // scope. But it's really not unresolved.
+ scope->outer_scope()->RemoveUnresolved(expression->AsVariableProxy());
+
scope->DeclareParameter(raw_name, VAR);
++(*num_params);
return true;
@@ -3609,13 +3769,12 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// nested function, and hoisting works normally relative to that.
Scope* declaration_scope = scope_->DeclarationScope();
Scope* original_declaration_scope = original_scope_->DeclarationScope();
- Scope* scope =
- function_type == FunctionLiteral::DECLARATION &&
- (!allow_harmony_scoping() || is_sloppy(language_mode())) &&
- (original_scope_ == original_declaration_scope ||
- declaration_scope != original_declaration_scope)
- ? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
- : NewScope(scope_, FUNCTION_SCOPE, kind);
+ Scope* scope = function_type == FunctionLiteral::DECLARATION &&
+ is_sloppy(language_mode()) &&
+ (original_scope_ == original_declaration_scope ||
+ declaration_scope != original_declaration_scope)
+ ? NewScope(declaration_scope, FUNCTION_SCOPE, kind)
+ : NewScope(scope_, FUNCTION_SCOPE, kind);
ZoneList<Statement*>* body = NULL;
int materialized_literal_count = -1;
int expected_property_count = -1;
@@ -3723,16 +3882,15 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
Variable* fvar = NULL;
Token::Value fvar_init_op = Token::INIT_CONST_LEGACY;
if (function_type == FunctionLiteral::NAMED_EXPRESSION) {
- if (allow_harmony_scoping() && is_strict(language_mode())) {
+ if (is_strict(language_mode())) {
fvar_init_op = Token::INIT_CONST;
}
VariableMode fvar_mode =
- allow_harmony_scoping() && is_strict(language_mode()) ? CONST
- : CONST_LEGACY;
+ is_strict(language_mode()) ? CONST : CONST_LEGACY;
DCHECK(function_name != NULL);
fvar = new (zone())
- Variable(scope_, function_name, fvar_mode, true /* is valid LHS */,
- Variable::NORMAL, kCreatedInitialized, kNotAssigned);
+ Variable(scope_, function_name, fvar_mode, Variable::NORMAL,
+ kCreatedInitialized, kNotAssigned);
VariableProxy* proxy = factory()->NewVariableProxy(fvar);
VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
@@ -3802,9 +3960,17 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
CHECK_OK);
}
- if (allow_harmony_scoping() && is_strict(language_mode())) {
+ if (is_strict(language_mode())) {
CheckConflictingVarDeclarations(scope, CHECK_OK);
}
+ if (is_strong(language_mode()) && IsSubclassConstructor(kind)) {
+ if (!function_state.super_call_location().IsValid()) {
+ ReportMessageAt(function_name_location, "strong_super_call_missing",
+ kReferenceError);
+ *ok = false;
+ return nullptr;
+ }
+ }
}
FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
@@ -3870,7 +4036,7 @@ void Parser::SkipLazyFunctionBody(const AstRawString* function_name,
if (logger.has_error()) {
ParserTraits::ReportMessageAt(
Scanner::Location(logger.start(), logger.end()), logger.message(),
- logger.argument_opt(), logger.is_reference_error());
+ logger.argument_opt(), logger.error_type());
*ok = false;
return;
}
@@ -4002,7 +4168,6 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
NULL, stack_limit_);
reusable_preparser_->set_allow_lazy(true);
reusable_preparser_->set_allow_natives(allow_natives());
- reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
reusable_preparser_->set_allow_harmony_modules(allow_harmony_modules());
reusable_preparser_->set_allow_harmony_arrow_functions(
allow_harmony_arrow_functions());
@@ -4011,7 +4176,6 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
reusable_preparser_->set_allow_harmony_classes(allow_harmony_classes());
reusable_preparser_->set_allow_harmony_object_literals(
allow_harmony_object_literals());
- reusable_preparser_->set_allow_harmony_templates(allow_harmony_templates());
reusable_preparser_->set_allow_harmony_sloppy(allow_harmony_sloppy());
reusable_preparser_->set_allow_harmony_unicode(allow_harmony_unicode());
reusable_preparser_->set_allow_harmony_computed_property_names(
@@ -4045,7 +4209,11 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
return NULL;
}
+ // Create a block scope which is additionally tagged as class scope; this is
+ // important for resolving variable references to the class name in the strong
+ // mode.
Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+ block_scope->tag_as_class_scope();
BlockState block_state(&scope_, block_scope);
scope_->SetLanguageMode(
static_cast<LanguageMode>(scope_->language_mode() | STRICT_BIT));
@@ -4074,6 +4242,7 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
bool has_seen_constructor = false;
Expect(Token::LBRACE, CHECK_OK);
+
const bool has_extends = extends != nullptr;
while (peek() != Token::RBRACE) {
if (Check(Token::SEMICOLON)) continue;
@@ -4108,12 +4277,14 @@ ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
}
block_scope->set_end_position(end_pos);
- block_scope = block_scope->FinalizeBlockScope();
if (name != NULL) {
DCHECK_NOT_NULL(proxy);
- DCHECK_NOT_NULL(block_scope);
proxy->var()->set_initializer_position(end_pos);
+ } else {
+ // Unnamed classes should not have scopes (the scope will be empty).
+ DCHECK_EQ(block_scope->num_var_or_const(), 0);
+ block_scope = nullptr;
}
return factory()->NewClassLiteral(name, block_scope, proxy, extends,
@@ -4239,79 +4410,30 @@ IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
}
-void Parser::HandleSourceURLComments(CompilationInfo* info) {
+void Parser::HandleSourceURLComments(Isolate* isolate, Handle<Script> script) {
if (scanner_.source_url()->length() > 0) {
- Handle<String> source_url =
- scanner_.source_url()->Internalize(info->isolate());
- info->script()->set_source_url(*source_url);
+ Handle<String> source_url = scanner_.source_url()->Internalize(isolate);
+ script->set_source_url(*source_url);
}
if (scanner_.source_mapping_url()->length() > 0) {
Handle<String> source_mapping_url =
- scanner_.source_mapping_url()->Internalize(info->isolate());
- info->script()->set_source_mapping_url(*source_mapping_url);
+ scanner_.source_mapping_url()->Internalize(isolate);
+ script->set_source_mapping_url(*source_mapping_url);
}
}
-void Parser::ThrowPendingError(Isolate* isolate, Handle<Script> script) {
- DCHECK(ast_value_factory()->IsInternalized());
- if (has_pending_error_) {
- MessageLocation location(script, pending_error_location_.beg_pos,
- pending_error_location_.end_pos);
- Factory* factory = isolate->factory();
- bool has_arg =
- pending_error_arg_ != NULL || pending_error_char_arg_ != NULL;
- Handle<FixedArray> elements = factory->NewFixedArray(has_arg ? 1 : 0);
- if (pending_error_arg_ != NULL) {
- Handle<String> arg_string = pending_error_arg_->string();
- elements->set(0, *arg_string);
- } else if (pending_error_char_arg_ != NULL) {
- Handle<String> arg_string =
- factory->NewStringFromUtf8(CStrVector(pending_error_char_arg_))
- .ToHandleChecked();
- elements->set(0, *arg_string);
- }
- isolate->debug()->OnCompileError(script);
-
- Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
- Handle<Object> error;
- MaybeHandle<Object> maybe_error =
- pending_error_is_reference_error_
- ? factory->NewReferenceError(pending_error_message_, array)
- : factory->NewSyntaxError(pending_error_message_, array);
-
- if (maybe_error.ToHandle(&error)) {
- Handle<JSObject> jserror = Handle<JSObject>::cast(error);
-
- Handle<Name> key_start_pos = factory->error_start_pos_symbol();
- JSObject::SetProperty(jserror, key_start_pos,
- handle(Smi::FromInt(location.start_pos()), isolate),
- SLOPPY).Check();
-
- Handle<Name> key_end_pos = factory->error_end_pos_symbol();
- JSObject::SetProperty(jserror, key_end_pos,
- handle(Smi::FromInt(location.end_pos()), isolate),
- SLOPPY).Check();
-
- Handle<Name> key_script = factory->error_script_symbol();
- JSObject::SetProperty(jserror, key_script, script, SLOPPY).Check();
-
- isolate->Throw(*error, &location);
- }
- }
-}
-
-
-void Parser::Internalize(CompilationInfo* info) {
+void Parser::Internalize(Isolate* isolate, Handle<Script> script, bool error) {
// Internalize strings.
- ast_value_factory()->Internalize(info->isolate());
+ ast_value_factory()->Internalize(isolate);
// Error processing.
- if (info->function() == NULL) {
+ if (error) {
if (stack_overflow()) {
- info->isolate()->StackOverflow();
+ isolate->StackOverflow();
} else {
- ThrowPendingError(info->isolate(), info->script());
+ DCHECK(pending_error_handler_.has_pending_error());
+ pending_error_handler_.ThrowPendingError(isolate, script);
}
}
@@ -4319,10 +4441,10 @@ void Parser::Internalize(CompilationInfo* info) {
for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
++feature) {
for (int i = 0; i < use_counts_[feature]; ++i) {
- info->isolate()->CountUsage(v8::Isolate::UseCounterFeature(feature));
+ isolate->CountUsage(v8::Isolate::UseCounterFeature(feature));
}
}
- info->isolate()->counters()->total_preparse_skipped()->Increment(
+ isolate->counters()->total_preparse_skipped()->Increment(
total_preparse_skipped_);
}
@@ -5240,20 +5362,17 @@ bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
}
-bool Parser::ParseStatic(CompilationInfo* info, bool allow_lazy) {
- Parser parser(info, info->isolate()->stack_guard()->real_climit(),
- info->isolate()->heap()->HashSeed(),
- info->isolate()->unicode_cache());
- parser.set_allow_lazy(allow_lazy);
+bool Parser::ParseStatic(ParseInfo* info) {
+ Parser parser(info);
if (parser.Parse(info)) {
- info->SetLanguageMode(info->function()->language_mode());
+ info->set_language_mode(info->function()->language_mode());
return true;
}
return false;
}
-bool Parser::Parse(CompilationInfo* info) {
+bool Parser::Parse(ParseInfo* info) {
DCHECK(info->function() == NULL);
FunctionLiteral* result = NULL;
// Ok to use Isolate here; this function is only called in the main thread.
@@ -5270,23 +5389,23 @@ bool Parser::Parse(CompilationInfo* info) {
if (info->is_lazy()) {
DCHECK(!info->is_eval());
if (info->shared_info()->is_function()) {
- result = ParseLazy(info);
+ result = ParseLazy(isolate, info);
} else {
- result = ParseProgram(info);
+ result = ParseProgram(isolate, info);
}
} else {
SetCachedData(info);
- result = ParseProgram(info);
+ result = ParseProgram(isolate, info);
}
- info->SetFunction(result);
+ info->set_literal(result);
- Internalize(info);
+ Internalize(isolate, info->script(), result == NULL);
DCHECK(ast_value_factory()->IsInternalized());
return (result != NULL);
}
-void Parser::ParseOnBackground(CompilationInfo* info) {
+void Parser::ParseOnBackground(ParseInfo* info) {
parsing_on_main_thread_ = false;
DCHECK(info->function() == NULL);
@@ -5317,7 +5436,7 @@ void Parser::ParseOnBackground(CompilationInfo* info) {
eval_scope->set_end_position(scanner()->location().end_pos);
}
- info->SetFunction(result);
+ info->set_literal(result);
// We cannot internalize on a background thread; a foreground task will take
// care of calling Parser::Internalize just before compilation.
@@ -5363,27 +5482,24 @@ Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
if (!tag) {
// Build tree of BinaryOps to simplify code-generation
- Expression* expr = NULL;
+ Expression* expr = cooked_strings->at(0);
+ int i = 0;
+ while (i < expressions->length()) {
+ Expression* sub = expressions->at(i++);
+ Expression* cooked_str = cooked_strings->at(i);
+
+ // Let middle be ToString(sub).
+ ZoneList<Expression*>* args =
+ new (zone()) ZoneList<Expression*>(1, zone());
+ args->Add(sub, zone());
+ Expression* middle = factory()->NewCallRuntime(
+ ast_value_factory()->to_string_string(), NULL, args,
+ sub->position());
- if (expressions->length() == 0) {
- // Simple case: treat as string literal
- expr = cooked_strings->at(0);
- } else {
- int i;
- Expression* cooked_str = cooked_strings->at(0);
expr = factory()->NewBinaryOperation(
- Token::ADD, cooked_str, expressions->at(0), cooked_str->position());
- for (i = 1; i < expressions->length(); ++i) {
- cooked_str = cooked_strings->at(i);
- expr = factory()->NewBinaryOperation(
- Token::ADD, expr, factory()->NewBinaryOperation(
- Token::ADD, cooked_str, expressions->at(i),
- cooked_str->position()),
- cooked_str->position());
- }
- cooked_str = cooked_strings->at(i);
- expr = factory()->NewBinaryOperation(Token::ADD, expr, cooked_str,
- cooked_str->position());
+ Token::ADD, factory()->NewBinaryOperation(
+ Token::ADD, expr, middle, expr->position()),
+ cooked_str, sub->position());
}
return expr;
} else {
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 713133a679..d93faaf0e1 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -7,22 +7,205 @@
#include "src/allocation.h"
#include "src/ast.h"
-#include "src/compiler.h" // For CachedDataMode
+#include "src/compiler.h" // TODO(titzer): remove this include dependency
+#include "src/pending-compilation-error-handler.h"
#include "src/preparse-data.h"
#include "src/preparse-data-format.h"
#include "src/preparser.h"
#include "src/scopes.h"
namespace v8 {
+
class ScriptCompiler;
namespace internal {
-class CompilationInfo;
-class ParserLog;
-class PositionStack;
class Target;
+// A container for the inputs, configuration options, and outputs of parsing.
+class ParseInfo {
+ public:
+ explicit ParseInfo(Zone* zone);
+ ParseInfo(Zone* zone, Handle<JSFunction> function);
+ ParseInfo(Zone* zone, Handle<Script> script);
+ // TODO(all) Only used via Debug::FindSharedFunctionInfoInScript, remove?
+ ParseInfo(Zone* zone, Handle<SharedFunctionInfo> shared);
+
+ ~ParseInfo() {
+ if (ast_value_factory_owned()) {
+ delete ast_value_factory_;
+ set_ast_value_factory_owned(false);
+ }
+ ast_value_factory_ = nullptr;
+ }
+
+ Zone* zone() { return zone_; }
+
+// Convenience accessor methods for flags.
+#define FLAG_ACCESSOR(flag, getter, setter) \
+ bool getter() const { return GetFlag(flag); } \
+ void setter() { SetFlag(flag); } \
+ void setter(bool val) { SetFlag(flag, val); }
+
+ FLAG_ACCESSOR(kToplevel, is_toplevel, set_toplevel)
+ FLAG_ACCESSOR(kLazy, is_lazy, set_lazy)
+ FLAG_ACCESSOR(kEval, is_eval, set_eval)
+ FLAG_ACCESSOR(kGlobal, is_global, set_global)
+ FLAG_ACCESSOR(kStrictMode, is_strict_mode, set_strict_mode)
+ FLAG_ACCESSOR(kStrongMode, is_strong_mode, set_strong_mode)
+ FLAG_ACCESSOR(kNative, is_native, set_native)
+ FLAG_ACCESSOR(kModule, is_module, set_module)
+ FLAG_ACCESSOR(kAllowLazyParsing, allow_lazy_parsing, set_allow_lazy_parsing)
+ FLAG_ACCESSOR(kAstValueFactoryOwned, ast_value_factory_owned,
+ set_ast_value_factory_owned)
+
+#undef FLAG_ACCESSOR
+
+ void set_parse_restriction(ParseRestriction restriction) {
+ SetFlag(kParseRestriction, restriction != NO_PARSE_RESTRICTION);
+ }
+
+ ParseRestriction parse_restriction() const {
+ return GetFlag(kParseRestriction) ? ONLY_SINGLE_FUNCTION_LITERAL
+ : NO_PARSE_RESTRICTION;
+ }
+
+ ScriptCompiler::ExternalSourceStream* source_stream() {
+ return source_stream_;
+ }
+ void set_source_stream(ScriptCompiler::ExternalSourceStream* source_stream) {
+ source_stream_ = source_stream;
+ }
+
+ ScriptCompiler::StreamedSource::Encoding source_stream_encoding() {
+ return source_stream_encoding_;
+ }
+ void set_source_stream_encoding(
+ ScriptCompiler::StreamedSource::Encoding source_stream_encoding) {
+ source_stream_encoding_ = source_stream_encoding;
+ }
+
+ v8::Extension* extension() { return extension_; }
+ void set_extension(v8::Extension* extension) { extension_ = extension; }
+
+ ScriptData** cached_data() { return cached_data_; }
+ void set_cached_data(ScriptData** cached_data) { cached_data_ = cached_data; }
+
+ ScriptCompiler::CompileOptions compile_options() { return compile_options_; }
+ void set_compile_options(ScriptCompiler::CompileOptions compile_options) {
+ compile_options_ = compile_options;
+ }
+
+ Scope* script_scope() { return script_scope_; }
+ void set_script_scope(Scope* script_scope) { script_scope_ = script_scope; }
+
+ AstValueFactory* ast_value_factory() { return ast_value_factory_; }
+ void set_ast_value_factory(AstValueFactory* ast_value_factory) {
+ ast_value_factory_ = ast_value_factory;
+ }
+
+ FunctionLiteral* function() { // TODO(titzer): temporary name adapter
+ return literal_;
+ }
+ FunctionLiteral* literal() { return literal_; }
+ void set_literal(FunctionLiteral* literal) { literal_ = literal; }
+
+ Scope* scope() { return scope_; }
+ void set_scope(Scope* scope) { scope_ = scope; }
+
+ UnicodeCache* unicode_cache() { return unicode_cache_; }
+ void set_unicode_cache(UnicodeCache* unicode_cache) {
+ unicode_cache_ = unicode_cache;
+ }
+
+ uintptr_t stack_limit() { return stack_limit_; }
+ void set_stack_limit(uintptr_t stack_limit) { stack_limit_ = stack_limit; }
+
+ uint32_t hash_seed() { return hash_seed_; }
+ void set_hash_seed(uint32_t hash_seed) { hash_seed_ = hash_seed; }
+
+ //--------------------------------------------------------------------------
+ // TODO(titzer): these should not be part of ParseInfo.
+ //--------------------------------------------------------------------------
+ Isolate* isolate() { return isolate_; }
+ Handle<JSFunction> closure() { return closure_; }
+ Handle<SharedFunctionInfo> shared_info() { return shared_; }
+ Handle<Script> script() { return script_; }
+ Handle<Context> context() { return context_; }
+ void clear_script() { script_ = Handle<Script>::null(); }
+ void set_isolate(Isolate* isolate) { isolate_ = isolate; }
+ void set_context(Handle<Context> context) { context_ = context; }
+ void set_script(Handle<Script> script) { script_ = script; }
+ //--------------------------------------------------------------------------
+
+ LanguageMode language_mode() {
+ return construct_language_mode(is_strict_mode(), is_strong_mode());
+ }
+ void set_language_mode(LanguageMode language_mode) {
+ STATIC_ASSERT(LANGUAGE_END == 3);
+ set_strict_mode(language_mode & STRICT_BIT);
+ set_strong_mode(language_mode & STRONG_BIT);
+ }
+
+ void ReopenHandlesInNewHandleScope() {
+ closure_ = Handle<JSFunction>(*closure_);
+ shared_ = Handle<SharedFunctionInfo>(*shared_);
+ script_ = Handle<Script>(*script_);
+ context_ = Handle<Context>(*context_);
+ }
+
+ private:
+ // Various configuration flags for parsing.
+ enum Flag {
+ // ---------- Input flags ---------------------------
+ kToplevel = 1 << 0,
+ kLazy = 1 << 1,
+ kEval = 1 << 2,
+ kGlobal = 1 << 3,
+ kStrictMode = 1 << 4,
+ kStrongMode = 1 << 5,
+ kNative = 1 << 6,
+ kParseRestriction = 1 << 7,
+ kModule = 1 << 8,
+ kAllowLazyParsing = 1 << 9,
+ // ---------- Output flags --------------------------
+ kAstValueFactoryOwned = 1 << 10
+ };
+
+ //------------- Inputs to parsing and scope analysis -----------------------
+ Zone* zone_;
+ unsigned flags_;
+ ScriptCompiler::ExternalSourceStream* source_stream_;
+ ScriptCompiler::StreamedSource::Encoding source_stream_encoding_;
+ v8::Extension* extension_;
+ ScriptCompiler::CompileOptions compile_options_;
+ Scope* script_scope_;
+ UnicodeCache* unicode_cache_;
+ uintptr_t stack_limit_;
+ uint32_t hash_seed_;
+
+ // TODO(titzer): Move handles and isolate out of ParseInfo.
+ Isolate* isolate_;
+ Handle<JSFunction> closure_;
+ Handle<SharedFunctionInfo> shared_;
+ Handle<Script> script_;
+ Handle<Context> context_;
+
+ //----------- Inputs+Outputs of parsing and scope analysis -----------------
+ ScriptData** cached_data_; // used if available, populated if requested.
+ AstValueFactory* ast_value_factory_; // used if available, otherwise new.
+
+ //----------- Outputs of parsing and scope analysis ------------------------
+ FunctionLiteral* literal_; // produced by full parser.
+ Scope* scope_; // produced by scope analysis.
+
+ void SetFlag(Flag f) { flags_ |= f; }
+ void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
+ bool GetFlag(Flag f) const { return (flags_ & f) != 0; }
+
+ void set_shared_info(Handle<SharedFunctionInfo> shared) { shared_ = shared; }
+ void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
+};
class FunctionEntry BASE_EMBEDDED {
public:
@@ -492,20 +675,16 @@ class ParserTraits {
const AstRawString* arg, int pos);
// Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- const char* message,
+ void ReportMessageAt(Scanner::Location source_location, const char* message,
const char* arg = NULL,
- bool is_reference_error = false);
- void ReportMessage(const char* message,
- const char* arg = NULL,
- bool is_reference_error = false);
- void ReportMessage(const char* message,
- const AstRawString* arg,
- bool is_reference_error = false);
- void ReportMessageAt(Scanner::Location source_location,
- const char* message,
+ ParseErrorType error_type = kSyntaxError);
+ void ReportMessage(const char* message, const char* arg = NULL,
+ ParseErrorType error_type = kSyntaxError);
+ void ReportMessage(const char* message, const AstRawString* arg,
+ ParseErrorType error_type = kSyntaxError);
+ void ReportMessageAt(Scanner::Location source_location, const char* message,
const AstRawString* arg,
- bool is_reference_error = false);
+ ParseErrorType error_type = kSyntaxError);
// "null" return type creators.
static const AstRawString* EmptyIdentifier() {
@@ -545,7 +724,8 @@ class ParserTraits {
int end_pos);
Literal* ExpressionFromLiteral(Token::Value token, int pos, Scanner* scanner,
AstNodeFactory* factory);
- Expression* ExpressionFromIdentifier(const AstRawString* name, int pos,
+ Expression* ExpressionFromIdentifier(const AstRawString* name,
+ int start_position, int end_position,
Scope* scope, AstNodeFactory* factory);
Expression* ExpressionFromString(int pos, Scanner* scanner,
AstNodeFactory* factory);
@@ -638,8 +818,7 @@ class ParserTraits {
class Parser : public ParserBase<ParserTraits> {
public:
- Parser(CompilationInfo* info, uintptr_t stack_limit, uint32_t hash_seed,
- UnicodeCache* unicode_cache);
+ explicit Parser(ParseInfo* info);
~Parser() {
delete reusable_preparser_;
reusable_preparser_ = NULL;
@@ -650,14 +829,14 @@ class Parser : public ParserBase<ParserTraits> {
// Parses the source code represented by the compilation info and sets its
// function literal. Returns false (and deallocates any allocated AST
// nodes) if parsing failed.
- static bool ParseStatic(CompilationInfo* info, bool allow_lazy = false);
- bool Parse(CompilationInfo* info);
- void ParseOnBackground(CompilationInfo* info);
+ static bool ParseStatic(ParseInfo* info);
+ bool Parse(ParseInfo* info);
+ void ParseOnBackground(ParseInfo* info);
// Handle errors detected during parsing, move statistics to Isolate,
// internalize strings (move them to the heap).
- void Internalize(CompilationInfo* info);
- void HandleSourceURLComments(CompilationInfo* info);
+ void Internalize(Isolate* isolate, Handle<Script> script, bool error);
+ void HandleSourceURLComments(Isolate* isolate, Handle<Script> script);
private:
friend class ParserTraits;
@@ -672,17 +851,17 @@ class Parser : public ParserBase<ParserTraits> {
static const int kMaxNumFunctionLocals = 4194303; // 2^22-1
// Returns NULL if parsing failed.
- FunctionLiteral* ParseProgram(CompilationInfo* info);
+ FunctionLiteral* ParseProgram(Isolate* isolate, ParseInfo* info);
- FunctionLiteral* ParseLazy(CompilationInfo* info);
- FunctionLiteral* ParseLazy(CompilationInfo* info,
+ FunctionLiteral* ParseLazy(Isolate* isolate, ParseInfo* info);
+ FunctionLiteral* ParseLazy(Isolate* isolate, ParseInfo* info,
Utf16CharacterStream* source);
// Called by ParseProgram after setting up the scanner.
- FunctionLiteral* DoParseProgram(CompilationInfo* info, Scope** scope,
+ FunctionLiteral* DoParseProgram(ParseInfo* info, Scope** scope,
Scope** ad_hoc_eval_scope);
- void SetCachedData(CompilationInfo* info);
+ void SetCachedData(ParseInfo* info);
bool inside_with() const { return scope_->inside_with(); }
ScriptCompiler::CompileOptions compile_options() const {
@@ -707,15 +886,17 @@ class Parser : public ParserBase<ParserTraits> {
void* ParseStatementList(ZoneList<Statement*>* body, int end_token,
bool is_eval, Scope** ad_hoc_eval_scope, bool* ok);
Statement* ParseStatementListItem(bool* ok);
- Statement* ParseModule(bool* ok);
+ void* ParseModuleItemList(ZoneList<Statement*>* body, bool* ok);
Statement* ParseModuleItem(bool* ok);
- Literal* ParseModuleSpecifier(bool* ok);
+ const AstRawString* ParseModuleSpecifier(bool* ok);
Statement* ParseImportDeclaration(bool* ok);
Statement* ParseExportDeclaration(bool* ok);
Statement* ParseExportDefault(bool* ok);
- void* ParseExportClause(ZoneList<const AstRawString*>* names,
+ void* ParseExportClause(ZoneList<const AstRawString*>* export_names,
+ ZoneList<Scanner::Location>* export_locations,
+ ZoneList<const AstRawString*>* local_names,
Scanner::Location* reserved_loc, bool* ok);
- void* ParseNamedImports(ZoneList<const AstRawString*>* names, bool* ok);
+ ZoneList<ImportDeclaration*>* ParseNamedImports(int pos, bool* ok);
Statement* ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok);
Statement* ParseSubStatement(ZoneList<const AstRawString*>* labels, bool* ok);
Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
@@ -763,8 +944,8 @@ class Parser : public ParserBase<ParserTraits> {
Expression* each,
Expression* subject,
Statement* body);
- Statement* DesugarLetBindingsInForStatement(
- Scope* inner_scope, ZoneList<const AstRawString*>* names,
+ Statement* DesugarLexicalBindingsInForStatement(
+ Scope* inner_scope, bool is_const, ZoneList<const AstRawString*>* names,
ForStatement* loop, Statement* init, Expression* cond, Statement* next,
Statement* body, bool* ok);
@@ -799,7 +980,7 @@ class Parser : public ParserBase<ParserTraits> {
// Parser support
VariableProxy* NewUnresolved(const AstRawString* name, VariableMode mode);
- void Declare(Declaration* declaration, bool resolve, bool* ok);
+ Variable* Declare(Declaration* declaration, bool resolve, bool* ok);
bool TargetStackContainsLabel(const AstRawString* label);
BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
@@ -845,13 +1026,7 @@ class Parser : public ParserBase<ParserTraits> {
bool parsing_lazy_arrow_parameters_; // for lazily parsed arrow functions.
- // Pending errors.
- bool has_pending_error_;
- Scanner::Location pending_error_location_;
- const char* pending_error_message_;
- const AstRawString* pending_error_arg_;
- const char* pending_error_char_arg_;
- bool pending_error_is_reference_error_;
+ PendingCompilationErrorHandler pending_error_handler_;
// Other information which will be stored in Parser and moved to Isolate after
// parsing.
diff --git a/deps/v8/src/pending-compilation-error-handler.cc b/deps/v8/src/pending-compilation-error-handler.cc
new file mode 100644
index 0000000000..f0449d82a9
--- /dev/null
+++ b/deps/v8/src/pending-compilation-error-handler.cc
@@ -0,0 +1,64 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/pending-compilation-error-handler.h"
+
+#include "src/debug.h"
+#include "src/handles.h"
+#include "src/isolate.h"
+#include "src/messages.h"
+
+namespace v8 {
+namespace internal {
+
+void PendingCompilationErrorHandler::ThrowPendingError(Isolate* isolate,
+ Handle<Script> script) {
+ if (!has_pending_error_) return;
+ MessageLocation location(script, start_position_, end_position_);
+ Factory* factory = isolate->factory();
+ bool has_arg = arg_ != NULL || char_arg_ != NULL || !handle_arg_.is_null();
+ Handle<FixedArray> elements = factory->NewFixedArray(has_arg ? 1 : 0);
+ if (arg_ != NULL) {
+ Handle<String> arg_string = arg_->string();
+ elements->set(0, *arg_string);
+ } else if (char_arg_ != NULL) {
+ Handle<String> arg_string =
+ factory->NewStringFromUtf8(CStrVector(char_arg_)).ToHandleChecked();
+ elements->set(0, *arg_string);
+ } else if (!handle_arg_.is_null()) {
+ elements->set(0, *handle_arg_);
+ }
+ isolate->debug()->OnCompileError(script);
+
+ Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
+ Handle<Object> error;
+
+ switch (error_type_) {
+ case kReferenceError:
+ error = factory->NewReferenceError(message_, array);
+ break;
+ case kSyntaxError:
+ error = factory->NewSyntaxError(message_, array);
+ break;
+ }
+
+ Handle<JSObject> jserror = Handle<JSObject>::cast(error);
+
+ Handle<Name> key_start_pos = factory->error_start_pos_symbol();
+ JSObject::SetProperty(jserror, key_start_pos,
+ handle(Smi::FromInt(location.start_pos()), isolate),
+ SLOPPY).Check();
+
+ Handle<Name> key_end_pos = factory->error_end_pos_symbol();
+ JSObject::SetProperty(jserror, key_end_pos,
+ handle(Smi::FromInt(location.end_pos()), isolate),
+ SLOPPY).Check();
+
+ Handle<Name> key_script = factory->error_script_symbol();
+ JSObject::SetProperty(jserror, key_script, script, SLOPPY).Check();
+
+ isolate->Throw(*error, &location);
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/pending-compilation-error-handler.h b/deps/v8/src/pending-compilation-error-handler.h
new file mode 100644
index 0000000000..c75f23d039
--- /dev/null
+++ b/deps/v8/src/pending-compilation-error-handler.h
@@ -0,0 +1,91 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PENDING_COMPILATION_ERROR_HANDLER_H_
+#define V8_PENDING_COMPILATION_ERROR_HANDLER_H_
+
+#include "src/base/macros.h"
+#include "src/globals.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+class AstRawString;
+class Isolate;
+class Script;
+
+// Helper class for handling pending compilation errors consistently in various
+// compilation phases.
+class PendingCompilationErrorHandler {
+ public:
+ PendingCompilationErrorHandler()
+ : has_pending_error_(false),
+ start_position_(-1),
+ end_position_(-1),
+ message_(nullptr),
+ arg_(nullptr),
+ char_arg_(nullptr),
+ error_type_(kSyntaxError) {}
+
+ void ReportMessageAt(int start_position, int end_position,
+ const char* message, const char* arg = nullptr,
+ ParseErrorType error_type = kSyntaxError) {
+ if (has_pending_error_) return;
+ has_pending_error_ = true;
+ start_position_ = start_position;
+ end_position_ = end_position;
+ message_ = message;
+ char_arg_ = arg;
+ arg_ = nullptr;
+ error_type_ = error_type;
+ }
+
+ void ReportMessageAt(int start_position, int end_position,
+ const char* message, const AstRawString* arg,
+ ParseErrorType error_type = kSyntaxError) {
+ if (has_pending_error_) return;
+ has_pending_error_ = true;
+ start_position_ = start_position;
+ end_position_ = end_position;
+ message_ = message;
+ char_arg_ = nullptr;
+ arg_ = arg;
+ error_type_ = error_type;
+ }
+
+ void ReportMessageAt(int start_position, int end_position,
+ const char* message, Handle<String> arg,
+ ParseErrorType error_type = kSyntaxError) {
+ if (has_pending_error_) return;
+ has_pending_error_ = true;
+ start_position_ = start_position;
+ end_position_ = end_position;
+ message_ = message;
+ char_arg_ = nullptr;
+ arg_ = nullptr;
+ handle_arg_ = arg;
+ error_type_ = error_type;
+ }
+
+ bool has_pending_error() const { return has_pending_error_; }
+
+ void ThrowPendingError(Isolate* isolate, Handle<Script> script);
+
+ private:
+ bool has_pending_error_;
+ int start_position_;
+ int end_position_;
+ const char* message_;
+ const AstRawString* arg_;
+ const char* char_arg_;
+ Handle<String> handle_arg_;
+ ParseErrorType error_type_;
+
+ DISALLOW_COPY_AND_ASSIGN(PendingCompilationErrorHandler);
+};
+
+} // namespace internal
+} // namespace v8
+#endif // V8_PENDING_COMPILATION_ERROR_HANDLER_H_
diff --git a/deps/v8/src/ppc/OWNERS b/deps/v8/src/ppc/OWNERS
new file mode 100644
index 0000000000..beecb3d0b1
--- /dev/null
+++ b/deps/v8/src/ppc/OWNERS
@@ -0,0 +1,3 @@
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 6779ee3d88..d95c7ec596 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -51,14 +51,36 @@ bool CpuFeatures::SupportsCrankshaft() { return true; }
void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
- if (RelocInfo::IsInternalReference(rmode_)) {
- // absolute code pointer inside code object moves with the code object.
- Assembler::RelocateInternalReference(pc_, delta, 0, icache_flush_mode);
+ // absolute code pointer inside code object moves with the code object.
+ if (IsInternalReference(rmode_)) {
+ // Jump table entry
+ Address target = Memory::Address_at(pc_);
+ Memory::Address_at(pc_) = target + delta;
+ } else {
+ // mov sequence
+ DCHECK(IsInternalReferenceEncoded(rmode_));
+ Address target = Assembler::target_address_at(pc_, host_);
+ Assembler::set_target_address_at(pc_, host_, target + delta,
+ icache_flush_mode);
+ }
+}
+
+
+Address RelocInfo::target_internal_reference() {
+ if (IsInternalReference(rmode_)) {
+ // Jump table entry
+ return Memory::Address_at(pc_);
+ } else {
+ // mov sequence
+ DCHECK(IsInternalReferenceEncoded(rmode_));
+ return Assembler::target_address_at(pc_, host_);
}
-#endif
- // We do not use pc relative addressing on PPC, so there is
- // nothing else to do.
+}
+
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ return reinterpret_cast<Address>(pc_);
}
@@ -72,14 +94,6 @@ Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
-#if V8_OOL_CONSTANT_POOL
- if (Assembler::IsConstantPoolLoadStart(pc_)) {
- // We return the PC for ool constant pool since this function is used by the
- // serializerer and expects the address to reside within the code object.
- return reinterpret_cast<Address>(pc_);
- }
-#endif
-
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
@@ -94,13 +108,8 @@ Address RelocInfo::target_address_address() {
Address RelocInfo::constant_pool_entry_address() {
-#if V8_OOL_CONSTANT_POOL
- return Assembler::target_constant_pool_address_at(pc_,
- host_->constant_pool());
-#else
UNREACHABLE();
return NULL;
-#endif
}
@@ -134,22 +143,12 @@ Address Assembler::target_address_from_return_address(Address pc) {
// mtlr ip
// blrl
// @ return address
-#if V8_OOL_CONSTANT_POOL
- if (IsConstantPoolLoadEnd(pc - 3 * kInstrSize)) {
- return pc - (kMovInstructionsConstantPool + 2) * kInstrSize;
- }
-#endif
- return pc - (kMovInstructionsNoConstantPool + 2) * kInstrSize;
+ return pc - (kMovInstructions + 2) * kInstrSize;
}
Address Assembler::return_address_from_call_start(Address pc) {
-#if V8_OOL_CONSTANT_POOL
- Address load_address = pc + (kMovInstructionsConstantPool - 1) * kInstrSize;
- if (IsConstantPoolLoadEnd(load_address))
- return pc + (kMovInstructionsConstantPool + 2) * kInstrSize;
-#endif
- return pc + (kMovInstructionsNoConstantPool + 2) * kInstrSize;
+ return pc + (kMovInstructions + 2) * kInstrSize;
}
@@ -180,7 +179,7 @@ void RelocInfo::set_target_object(Object* target,
}
-Address RelocInfo::target_reference() {
+Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, host_);
}
@@ -227,13 +226,8 @@ void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
}
-#if V8_OOL_CONSTANT_POOL
-static const int kNoCodeAgeInstructions = 7;
-#else
static const int kNoCodeAgeInstructions = 6;
-#endif
-static const int kCodeAgingInstructions =
- Assembler::kMovInstructionsNoConstantPool + 3;
+static const int kCodeAgingInstructions = Assembler::kMovInstructions + 3;
static const int kNoCodeAgeSequenceInstructions =
((kNoCodeAgeInstructions >= kCodeAgingInstructions)
? kNoCodeAgeInstructions
@@ -273,8 +267,8 @@ Address RelocInfo::call_address() {
DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
// The pc_ offset of 0 assumes patched return sequence per
- // BreakLocationIterator::SetDebugBreakAtReturn(), or debug break
- // slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ // BreakLocation::SetDebugBreakAtReturn(), or debug break
+ // slot per BreakLocation::SetDebugBreakAtSlot().
return Assembler::target_address_at(pc_, host_);
}
@@ -308,15 +302,25 @@ Object** RelocInfo::call_object_address() {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
- IsRuntimeEntry(rmode_) || IsExternalReference(rmode_));
- Assembler::set_target_address_at(pc_, host_, NULL);
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
+ if (IsInternalReference(rmode_)) {
+ // Jump table entry
+ Memory::Address_at(pc_) = NULL;
+ } else if (IsInternalReferenceEncoded(rmode_)) {
+ // mov sequence
+ // Currently used only by deserializer, no need to flush.
+ Assembler::set_target_address_at(pc_, host_, NULL, SKIP_ICACHE_FLUSH);
+ } else {
+ Assembler::set_target_address_at(pc_, host_, NULL);
+ }
}
bool RelocInfo::IsPatchedReturnSequence() {
//
// The patched return sequence is defined by
- // BreakLocationIterator::SetDebugBreakAtReturn()
+ // BreakLocation::SetDebugBreakAtReturn()
// FIXED_SEQUENCE
Instr instr0 = Assembler::instr_at(pc_);
@@ -356,6 +360,9 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
+ mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) ||
@@ -380,6 +387,9 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE ||
+ mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
+ StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
@@ -459,59 +469,10 @@ Address Assembler::target_address_at(Address pc,
(instr2 & kImm16Mask));
#endif
}
-#if V8_OOL_CONSTANT_POOL
- return Memory::Address_at(target_constant_pool_address_at(pc, constant_pool));
-#else
- DCHECK(false);
- return (Address)0;
-#endif
-}
-
-
-#if V8_OOL_CONSTANT_POOL
-bool Assembler::IsConstantPoolLoadStart(Address pc) {
-#if V8_TARGET_ARCH_PPC64
- if (!IsLi(instr_at(pc))) return false;
- pc += kInstrSize;
-#endif
- return GetRA(instr_at(pc)).is(kConstantPoolRegister);
-}
-
-
-bool Assembler::IsConstantPoolLoadEnd(Address pc) {
-#if V8_TARGET_ARCH_PPC64
- pc -= kInstrSize;
-#endif
- return IsConstantPoolLoadStart(pc);
-}
-
-
-int Assembler::GetConstantPoolOffset(Address pc) {
- DCHECK(IsConstantPoolLoadStart(pc));
- Instr instr = instr_at(pc);
- int offset = SIGN_EXT_IMM16((instr & kImm16Mask));
- return offset;
-}
-
-
-void Assembler::SetConstantPoolOffset(Address pc, int offset) {
- DCHECK(IsConstantPoolLoadStart(pc));
- DCHECK(is_int16(offset));
- Instr instr = instr_at(pc);
- instr &= ~kImm16Mask;
- instr |= (offset & kImm16Mask);
- instr_at_put(pc, instr);
-}
-
-Address Assembler::target_constant_pool_address_at(
- Address pc, ConstantPoolArray* constant_pool) {
- Address addr = reinterpret_cast<Address>(constant_pool);
- DCHECK(addr);
- addr += GetConstantPoolOffset(pc);
- return addr;
+ UNREACHABLE();
+ return NULL;
}
-#endif
// This sets the branch destination (which gets loaded at the call address).
@@ -523,6 +484,18 @@ void Assembler::deserialization_set_special_target_at(
set_target_address_at(instruction_payload, code, target);
}
+
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ if (RelocInfo::IsInternalReferenceEncoded(mode)) {
+ Code* code = NULL;
+ set_target_address_at(pc, code, target, SKIP_ICACHE_FLUSH);
+ } else {
+ Memory::Address_at(pc) = target;
+ }
+}
+
+
// This code assumes the FIXED_SEQUENCE of lis/ori
void Assembler::set_target_address_at(Address pc,
ConstantPoolArray* constant_pool,
@@ -578,14 +551,9 @@ void Assembler::set_target_address_at(Address pc,
CpuFeatures::FlushICache(p, 2 * kInstrSize);
}
#endif
- } else {
-#if V8_OOL_CONSTANT_POOL
- Memory::Address_at(target_constant_pool_address_at(pc, constant_pool)) =
- target;
-#else
- UNREACHABLE();
-#endif
+ return;
}
+ UNREACHABLE();
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index 8bb45e36cc..7778ab1ce1 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -42,7 +42,6 @@
#include "src/base/cpu.h"
#include "src/macro-assembler.h"
#include "src/ppc/assembler-ppc-inl.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -142,45 +141,21 @@ const char* DoubleRegister::AllocationIndexToString(int index) {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
+ 1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially
// coded. Being specially coded on PPC means that it is a lis/ori
- // instruction sequence or is an out of line constant pool entry,
- // and these are always the case inside code objects.
+ // instruction sequence, and these are always the case inside code
+ // objects.
return true;
}
bool RelocInfo::IsInConstantPool() {
-#if V8_OOL_CONSTANT_POOL
- return Assembler::IsConstantPoolLoadStart(pc_);
-#else
return false;
-#endif
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- // Patch the code at the current address with a call to the target.
- UNIMPLEMENTED();
}
@@ -226,9 +201,6 @@ MemOperand::MemOperand(Register ra, Register rb) {
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
-#if V8_OOL_CONSTANT_POOL
- constant_pool_builder_(),
-#endif
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@@ -244,11 +216,13 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
trampoline_emitted_ = FLAG_force_long_branches;
unbound_labels_count_ = 0;
ClearRecordedAstId();
+ relocations_.reserve(128);
}
void Assembler::GetCode(CodeDesc* desc) {
- reloc_info_writer.Finish();
+ EmitRelocations();
+
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -401,32 +375,43 @@ int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
const int kEndOfChain = -4;
+// Dummy opcodes for unbound label mov instructions or jump table entries.
+enum {
+ kUnboundMovLabelOffsetOpcode = 0 << 26,
+ kUnboundAddLabelOffsetOpcode = 1 << 26,
+ kUnboundMovLabelAddrOpcode = 2 << 26,
+ kUnboundJumpTableEntryOpcode = 3 << 26
+};
+
+
int Assembler::target_at(int pos) {
Instr instr = instr_at(pos);
// check which type of branch this is 16 or 26 bit offset
int opcode = instr & kOpcodeMask;
- if (BX == opcode) {
- int imm26 = ((instr & kImm26Mask) << 6) >> 6;
- imm26 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
- if (imm26 == 0) return kEndOfChain;
- return pos + imm26;
- } else if (BCX == opcode) {
- int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
- imm16 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
- if (imm16 == 0) return kEndOfChain;
- return pos + imm16;
- } else if ((instr & ~kImm26Mask) == 0) {
- // Emitted link to a label, not part of a branch (regexp PushBacktrack).
- if (instr == 0) {
- return kEndOfChain;
- } else {
- int32_t imm26 = SIGN_EXT_IMM26(instr);
- return (imm26 + pos);
- }
+ int link;
+ switch (opcode) {
+ case BX:
+ link = SIGN_EXT_IMM26(instr & kImm26Mask);
+ link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
+ break;
+ case BCX:
+ link = SIGN_EXT_IMM16((instr & kImm16Mask));
+ link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
+ break;
+ case kUnboundMovLabelOffsetOpcode:
+ case kUnboundAddLabelOffsetOpcode:
+ case kUnboundMovLabelAddrOpcode:
+ case kUnboundJumpTableEntryOpcode:
+ link = SIGN_EXT_IMM26(instr & kImm26Mask);
+ link <<= 2;
+ break;
+ default:
+ DCHECK(false);
+ return -1;
}
- DCHECK(false);
- return -1;
+ if (link == 0) return kEndOfChain;
+ return pos + link;
}
@@ -434,51 +419,74 @@ void Assembler::target_at_put(int pos, int target_pos) {
Instr instr = instr_at(pos);
int opcode = instr & kOpcodeMask;
- // check which type of branch this is 16 or 26 bit offset
- if (BX == opcode) {
- int imm26 = target_pos - pos;
- DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
- if (imm26 == kInstrSize && !(instr & kLKMask)) {
- // Branch to next instr without link.
- instr = ORI; // nop: ori, 0,0,0
- } else {
- instr &= ((~kImm26Mask) | kAAMask | kLKMask);
- instr |= (imm26 & kImm26Mask);
+ switch (opcode) {
+ case BX: {
+ int imm26 = target_pos - pos;
+ DCHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
+ if (imm26 == kInstrSize && !(instr & kLKMask)) {
+ // Branch to next instr without link.
+ instr = ORI; // nop: ori, 0,0,0
+ } else {
+ instr &= ((~kImm26Mask) | kAAMask | kLKMask);
+ instr |= (imm26 & kImm26Mask);
+ }
+ instr_at_put(pos, instr);
+ break;
}
- instr_at_put(pos, instr);
- return;
- } else if (BCX == opcode) {
- int imm16 = target_pos - pos;
- DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
- if (imm16 == kInstrSize && !(instr & kLKMask)) {
- // Branch to next instr without link.
- instr = ORI; // nop: ori, 0,0,0
- } else {
- instr &= ((~kImm16Mask) | kAAMask | kLKMask);
- instr |= (imm16 & kImm16Mask);
+ case BCX: {
+ int imm16 = target_pos - pos;
+ DCHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
+ if (imm16 == kInstrSize && !(instr & kLKMask)) {
+ // Branch to next instr without link.
+ instr = ORI; // nop: ori, 0,0,0
+ } else {
+ instr &= ((~kImm16Mask) | kAAMask | kLKMask);
+ instr |= (imm16 & kImm16Mask);
+ }
+ instr_at_put(pos, instr);
+ break;
}
- instr_at_put(pos, instr);
- return;
- } else if ((instr & ~kImm26Mask) == 0) {
- DCHECK(target_pos == kEndOfChain || target_pos >= 0);
- // Emitted link to a label, not part of a branch (regexp PushBacktrack).
- // Load the position of the label relative to the generated code object
- // pointer in a register.
-
- Register dst = r3; // we assume r3 for now
- DCHECK(IsNop(instr_at(pos + kInstrSize)));
- uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag);
- CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
- CodePatcher::DONT_FLUSH);
- int target_hi = static_cast<int>(target) >> 16;
- int target_lo = static_cast<int>(target) & 0XFFFF;
-
- patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi)));
- patcher.masm()->ori(dst, dst, Operand(target_lo));
- return;
+ case kUnboundMovLabelOffsetOpcode: {
+ // Load the position of the label relative to the generated code object
+ // pointer in a register.
+ Register dst = Register::from_code(instr_at(pos + kInstrSize));
+ int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->bitwise_mov32(dst, offset);
+ break;
+ }
+ case kUnboundAddLabelOffsetOpcode: {
+ // dst = base + position + immediate
+ Instr operands = instr_at(pos + kInstrSize);
+ Register dst = Register::from_code((operands >> 21) & 0x1f);
+ Register base = Register::from_code((operands >> 16) & 0x1f);
+ int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+ CodePatcher::DONT_FLUSH);
+ patcher.masm()->bitwise_add32(dst, base, offset);
+ break;
+ }
+ case kUnboundMovLabelAddrOpcode: {
+ // Load the address of the label in a register.
+ Register dst = Register::from_code(instr_at(pos + kInstrSize));
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ kMovInstructions, CodePatcher::DONT_FLUSH);
+ // Keep internal references relative until EmitRelocations.
+ patcher.masm()->bitwise_mov(dst, target_pos);
+ break;
+ }
+ case kUnboundJumpTableEntryOpcode: {
+ CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos),
+ kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
+ // Keep internal references relative until EmitRelocations.
+ patcher.masm()->emit_ptr(target_pos);
+ break;
+ }
+ default:
+ DCHECK(false);
+ break;
}
-
- DCHECK(false);
}
@@ -487,13 +495,16 @@ int Assembler::max_reach_from(int pos) {
int opcode = instr & kOpcodeMask;
// check which type of branch this is 16 or 26 bit offset
- if (BX == opcode) {
- return 26;
- } else if (BCX == opcode) {
- return 16;
- } else if ((instr & ~kImm26Mask) == 0) {
- // Emitted label constant, not part of a branch (regexp PushBacktrack).
- return 26;
+ switch (opcode) {
+ case BX:
+ return 26;
+ case BCX:
+ return 16;
+ case kUnboundMovLabelOffsetOpcode:
+ case kUnboundAddLabelOffsetOpcode:
+ case kUnboundMovLabelAddrOpcode:
+ case kUnboundJumpTableEntryOpcode:
+ return 0; // no limit on reach
}
DCHECK(false);
@@ -514,7 +525,7 @@ void Assembler::bind_to(Label* L, int pos) {
int32_t offset = pos - fixup_pos;
int maxReach = max_reach_from(fixup_pos);
next(L); // call next before overwriting link with target at fixup_pos
- if (is_intn(offset, maxReach) == false) {
+ if (maxReach && is_intn(offset, maxReach) == false) {
if (trampoline_pos == kInvalidSlotPos) {
trampoline_pos = get_trampoline_entry();
CHECK(trampoline_pos != kInvalidSlotPos);
@@ -636,19 +647,19 @@ int32_t Assembler::get_trampoline_entry() {
}
-int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
- int target_pos;
+int Assembler::link(Label* L) {
+ int position;
if (L->is_bound()) {
- target_pos = L->pos();
+ position = L->pos();
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ position = L->pos(); // L's link
} else {
// was: target_pos = kEndOfChain;
- // However, using branch to self to mark the first reference
+ // However, using self to mark the first reference
// should avoid most instances of branch offset overflow. See
// target_at() for where this is converted back to kEndOfChain.
- target_pos = pc_offset();
+ position = pc_offset();
if (!trampoline_emitted_) {
unbound_labels_count_++;
next_buffer_check_ -= kTrampolineSlotsSize;
@@ -657,7 +668,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
L->link_to(pc_offset());
}
- return target_pos - pc_offset();
+ return position;
}
@@ -1478,102 +1489,21 @@ void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
// TOC and static chain are ignored and set to 0.
void Assembler::function_descriptor() {
#if ABI_USES_FUNCTION_DESCRIPTORS
+ Label instructions;
DCHECK(pc_offset() == 0);
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
- emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
+ emit_label_addr(&instructions);
emit_ptr(0);
emit_ptr(0);
+ bind(&instructions);
#endif
}
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
-void Assembler::RelocateInternalReference(Address pc, intptr_t delta,
- Address code_start,
- ICacheFlushMode icache_flush_mode) {
- DCHECK(delta || code_start);
-#if ABI_USES_FUNCTION_DESCRIPTORS
- uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
- if (fd[1] == 0 && fd[2] == 0) {
- // Function descriptor
- if (delta) {
- fd[0] += delta;
- } else {
- fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
- }
- return;
- }
-#endif
-#if V8_OOL_CONSTANT_POOL
- // mov for LoadConstantPoolPointerRegister
- ConstantPoolArray* constant_pool = NULL;
- if (delta) {
- code_start = target_address_at(pc, constant_pool) + delta;
- }
- set_target_address_at(pc, constant_pool, code_start, icache_flush_mode);
-#endif
-}
-
-
-int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) {
-#if ABI_USES_FUNCTION_DESCRIPTORS
- uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
- if (fd[1] == 0 && fd[2] == 0) {
- // Function descriptor
- SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
- "]"
- " function descriptor",
- fd[0], fd[1], fd[2]);
- return kPointerSize * 3;
- }
-#endif
- return 0;
-}
-#endif
-
-
-int Assembler::instructions_required_for_mov(const Operand& x) const {
-#if V8_OOL_CONSTANT_POOL || DEBUG
- bool canOptimize =
- !(x.must_output_reloc_info(this) || is_trampoline_pool_blocked());
-#endif
-#if V8_OOL_CONSTANT_POOL
- if (use_constant_pool_for_mov(x, canOptimize)) {
- // Current usage guarantees that all constant pool references can
- // use the same sequence.
- return kMovInstructionsConstantPool;
- }
-#endif
- DCHECK(!canOptimize);
- return kMovInstructionsNoConstantPool;
-}
-
-
-#if V8_OOL_CONSTANT_POOL
-bool Assembler::use_constant_pool_for_mov(const Operand& x,
- bool canOptimize) const {
- if (!is_ool_constant_pool_available() || is_constant_pool_full()) {
- // If there is no constant pool available, we must use a mov
- // immediate sequence.
- return false;
- }
-
- intptr_t value = x.immediate();
- if (canOptimize && is_int16(value)) {
- // Prefer a single-instruction load-immediate.
- return false;
- }
-
- return true;
-}
-
-
void Assembler::EnsureSpaceFor(int space_needed) {
if (buffer_space() <= (kGap + space_needed)) {
- GrowBuffer();
+ GrowBuffer(space_needed);
}
}
-#endif
bool Operand::must_output_reloc_info(const Assembler* assembler) const {
@@ -1595,32 +1525,11 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
// and only use the generic version when we require a fixed sequence
void Assembler::mov(Register dst, const Operand& src) {
intptr_t value = src.immediate();
+ bool relocatable = src.must_output_reloc_info(this);
bool canOptimize;
- RelocInfo rinfo(pc_, src.rmode_, value, NULL);
- if (src.must_output_reloc_info(this)) {
- RecordRelocInfo(rinfo);
- }
-
- canOptimize = !(src.must_output_reloc_info(this) ||
- (is_trampoline_pool_blocked() && !is_int16(value)));
-
-#if V8_OOL_CONSTANT_POOL
- if (use_constant_pool_for_mov(src, canOptimize)) {
- DCHECK(is_ool_constant_pool_available());
- ConstantPoolAddEntry(rinfo);
-#if V8_TARGET_ARCH_PPC64
- BlockTrampolinePoolScope block_trampoline_pool(this);
- // We are forced to use 2 instruction sequence since the constant
- // pool pointer is tagged.
- li(dst, Operand::Zero());
- ldx(dst, MemOperand(kConstantPoolRegister, dst));
-#else
- lwz(dst, MemOperand(kConstantPoolRegister, 0));
-#endif
- return;
- }
-#endif
+ canOptimize =
+ !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
if (canOptimize) {
if (is_int16(value)) {
@@ -1658,8 +1567,14 @@ void Assembler::mov(Register dst, const Operand& src) {
}
DCHECK(!canOptimize);
+ if (relocatable) {
+ RecordRelocInfo(src.rmode_);
+ }
+ bitwise_mov(dst, value);
+}
+
- {
+void Assembler::bitwise_mov(Register dst, intptr_t value) {
BlockTrampolinePoolScope block_trampoline_pool(this);
#if V8_TARGET_ARCH_PPC64
int32_t hi_32 = static_cast<int32_t>(value >> 32);
@@ -1679,37 +1594,138 @@ void Assembler::mov(Register dst, const Operand& src) {
lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
ori(dst, dst, Operand(lo_word));
#endif
+}
+
+
+void Assembler::bitwise_mov32(Register dst, int32_t value) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int hi_word = static_cast<int>(value >> 16);
+ int lo_word = static_cast<int>(value & 0xffff);
+ lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
+ ori(dst, dst, Operand(lo_word));
+}
+
+
+void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (is_int16(value)) {
+ addi(dst, src, Operand(value));
+ nop();
+ } else {
+ int hi_word = static_cast<int>(value >> 16);
+ int lo_word = static_cast<int>(value & 0xffff);
+ if (lo_word & 0x8000) hi_word++;
+ addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
+ addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
}
}
void Assembler::mov_label_offset(Register dst, Label* label) {
+ int position = link(label);
if (label->is_bound()) {
- int target = label->pos();
- mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ // Load the position of the label relative to the generated code object.
+ mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
} else {
- bool is_linked = label->is_linked();
- // Emit the link to the label in the code stream followed by extra
- // nop instructions.
- DCHECK(dst.is(r3)); // target_at_put assumes r3 for now
- int link = is_linked ? label->pos() - pc_offset() : 0;
- label->link_to(pc_offset());
-
- if (!is_linked && !trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
+ // Encode internal reference to unbound label. We use a dummy opcode
+ // such that it won't collide with any opcode that might appear in the
+ // label's chain. Encode the destination register in the 2nd instruction.
+ int link = position - pc_offset();
+ DCHECK_EQ(0, link & 3);
+ link >>= 2;
+ DCHECK(is_int26(link));
// When the label is bound, these instructions will be patched
// with a 2 instruction mov sequence that will load the
// destination register with the position of the label from the
// beginning of the code.
//
- // When the label gets bound: target_at extracts the link and
- // target_at_put patches the instructions.
+ // target_at extracts the link and target_at_put patches the instructions.
BlockTrampolinePoolScope block_trampoline_pool(this);
- emit(link);
+ emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
+ emit(dst.code());
+ }
+}
+
+
+void Assembler::add_label_offset(Register dst, Register base, Label* label,
+ int delta) {
+ int position = link(label);
+ if (label->is_bound()) {
+ // dst = base + position + delta
+ position += delta;
+ bitwise_add32(dst, base, position);
+ } else {
+ // Encode internal reference to unbound label. We use a dummy opcode
+ // such that it won't collide with any opcode that might appear in the
+ // label's chain. Encode the operands in the 2nd instruction.
+ int link = position - pc_offset();
+ DCHECK_EQ(0, link & 3);
+ link >>= 2;
+ DCHECK(is_int26(link));
+ DCHECK(is_int16(delta));
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
+ emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
+ }
+}
+
+
+void Assembler::mov_label_addr(Register dst, Label* label) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ int position = link(label);
+ if (label->is_bound()) {
+ // Keep internal references relative until EmitRelocations.
+ bitwise_mov(dst, position);
+ } else {
+ // Encode internal reference to unbound label. We use a dummy opcode
+ // such that it won't collide with any opcode that might appear in the
+ // label's chain. Encode the destination register in the 2nd instruction.
+ int link = position - pc_offset();
+ DCHECK_EQ(0, link & 3);
+ link >>= 2;
+ DCHECK(is_int26(link));
+
+ // When the label is bound, these instructions will be patched
+ // with a multi-instruction mov sequence that will load the
+ // destination register with the address of the label.
+ //
+ // target_at extracts the link and target_at_put patches the instructions.
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
+ emit(dst.code());
+ DCHECK(kMovInstructions >= 2);
+ for (int i = 0; i < kMovInstructions - 2; i++) nop();
+ }
+}
+
+
+void Assembler::emit_label_addr(Label* label) {
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ int position = link(label);
+ if (label->is_bound()) {
+ // Keep internal references relative until EmitRelocations.
+ emit_ptr(position);
+ } else {
+ // Encode internal reference to unbound label. We use a dummy opcode
+ // such that it won't collide with any opcode that might appear in the
+ // label's chain.
+ int link = position - pc_offset();
+ DCHECK_EQ(0, link & 3);
+ link >>= 2;
+ DCHECK(is_int26(link));
+
+ // When the label is bound, the instruction(s) will be patched
+ // as a jump table entry containing the label address. target_at extracts
+ // the link and target_at_put patches the instruction(s).
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
+#if V8_TARGET_ARCH_PPC64
nop();
+#endif
}
}
@@ -2172,8 +2188,7 @@ bool Assembler::IsNop(Instr instr, int type) {
}
-// Debugging.
-void Assembler::GrowBuffer() {
+void Assembler::GrowBuffer(int needed) {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
@@ -2185,6 +2200,10 @@ void Assembler::GrowBuffer() {
} else {
desc.buffer_size = buffer_size_ + 1 * MB;
}
+ int space = buffer_space() + (desc.buffer_size - buffer_size_);
+ if (space < needed) {
+ desc.buffer_size += needed - space;
+ }
CHECK_GT(desc.buffer_size, 0); // no overflow
// Set up new buffer.
@@ -2209,22 +2228,9 @@ void Assembler::GrowBuffer() {
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
-// None of our relocation types are pc relative pointing outside the code
-// buffer nor pc absolute pointing inside the code buffer, so there is no need
-// to relocate any emitted relocation entries.
-
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
- // Relocate runtime entries.
- for (RelocIterator it(desc); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0);
- }
- }
-#if V8_OOL_CONSTANT_POOL
- constant_pool_builder_.Relocate(pc_delta);
-#endif
-#endif
+ // Nothing else to do here since we keep all internal references and
+ // deferred relocation entries relative to the buffer (until
+ // EmitRelocations).
}
@@ -2242,20 +2248,27 @@ void Assembler::dd(uint32_t data) {
}
-void Assembler::emit_ptr(uintptr_t data) {
+void Assembler::emit_ptr(intptr_t data) {
CheckBuffer();
- *reinterpret_cast<uintptr_t*>(pc_) = data;
- pc_ += sizeof(uintptr_t);
+ *reinterpret_cast<intptr_t*>(pc_) = data;
+ pc_ += sizeof(intptr_t);
+}
+
+
+void Assembler::emit_double(double value) {
+ CheckBuffer();
+ *reinterpret_cast<double*>(pc_) = value;
+ pc_ += sizeof(double);
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data, NULL);
+ DeferredRelocInfo rinfo(pc_offset(), rmode, data);
RecordRelocInfo(rinfo);
}
-void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
+void Assembler::RecordRelocInfo(const DeferredRelocInfo& rinfo) {
if (rinfo.rmode() >= RelocInfo::JS_RETURN &&
rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
@@ -2271,19 +2284,46 @@ void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
return;
}
}
- DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(),
- RecordedAstId().ToInt(), NULL);
+ DeferredRelocInfo reloc_info_with_ast_id(rinfo.position(), rinfo.rmode(),
+ RecordedAstId().ToInt());
ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
+ relocations_.push_back(reloc_info_with_ast_id);
} else {
- reloc_info_writer.Write(&rinfo);
+ relocations_.push_back(rinfo);
}
}
}
+void Assembler::EmitRelocations() {
+ EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
+
+ for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
+ it != relocations_.end(); it++) {
+ RelocInfo::Mode rmode = it->rmode();
+ Address pc = buffer_ + it->position();
+ Code* code = NULL;
+ RelocInfo rinfo(pc, rmode, it->data(), code);
+
+ // Fix up internal references now that they are guaranteed to be bound.
+ if (RelocInfo::IsInternalReference(rmode)) {
+ // Jump table entry
+ intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
+ Memory::Address_at(pc) = buffer_ + pos;
+ } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
+ // mov sequence
+ intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
+ set_target_address_at(pc, code, buffer_ + pos, SKIP_ICACHE_FLUSH);
+ }
+
+ reloc_info_writer.Write(&rinfo);
+ }
+
+ reloc_info_writer.Finish();
+}
+
+
void Assembler::BlockTrampolinePoolFor(int instructions) {
BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
}
@@ -2339,193 +2379,14 @@ void Assembler::CheckTrampolinePool() {
Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
-#if V8_OOL_CONSTANT_POOL
- return constant_pool_builder_.New(isolate);
-#else
- // No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
return isolate->factory()->empty_constant_pool_array();
-#endif
}
void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
-#if V8_OOL_CONSTANT_POOL
- constant_pool_builder_.Populate(this, constant_pool);
-#else
- // No out-of-line constant pool support.
DCHECK(!FLAG_enable_ool_constant_pool);
-#endif
}
-
-
-#if V8_OOL_CONSTANT_POOL
-ConstantPoolBuilder::ConstantPoolBuilder()
- : size_(0),
- entries_(),
- current_section_(ConstantPoolArray::SMALL_SECTION) {}
-
-
-bool ConstantPoolBuilder::IsEmpty() { return entries_.size() == 0; }
-
-
-ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
- RelocInfo::Mode rmode) {
-#if V8_TARGET_ARCH_PPC64
- // We don't support 32-bit entries at this time.
- if (!RelocInfo::IsGCRelocMode(rmode)) {
- return ConstantPoolArray::INT64;
-#else
- if (rmode == RelocInfo::NONE64) {
- return ConstantPoolArray::INT64;
- } else if (!RelocInfo::IsGCRelocMode(rmode)) {
- return ConstantPoolArray::INT32;
-#endif
- } else if (RelocInfo::IsCodeTarget(rmode)) {
- return ConstantPoolArray::CODE_PTR;
- } else {
- DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
- return ConstantPoolArray::HEAP_PTR;
- }
-}
-
-
-ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
- Assembler* assm, const RelocInfo& rinfo) {
- RelocInfo::Mode rmode = rinfo.rmode();
- DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
- rmode != RelocInfo::STATEMENT_POSITION &&
- rmode != RelocInfo::CONST_POOL);
-
- // Try to merge entries which won't be patched.
- int merged_index = -1;
- ConstantPoolArray::LayoutSection entry_section = current_section_;
- if (RelocInfo::IsNone(rmode) ||
- (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
- size_t i;
- std::vector<ConstantPoolEntry>::const_iterator it;
- for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
- if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
- // Merge with found entry.
- merged_index = i;
- entry_section = entries_[i].section_;
- break;
- }
- }
- }
- DCHECK(entry_section <= current_section_);
- entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
-
- if (merged_index == -1) {
- // Not merged, so update the appropriate count.
- number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
- }
-
- // Check if we still have room for another entry in the small section
- // given the limitations of the header's layout fields.
- if (current_section_ == ConstantPoolArray::SMALL_SECTION) {
- size_ = ConstantPoolArray::SizeFor(*small_entries());
- if (!is_uint12(size_)) {
- current_section_ = ConstantPoolArray::EXTENDED_SECTION;
- }
- } else {
- size_ = ConstantPoolArray::SizeForExtended(*small_entries(),
- *extended_entries());
- }
-
- return entry_section;
-}
-
-
-void ConstantPoolBuilder::Relocate(intptr_t pc_delta) {
- for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
- entry != entries_.end(); entry++) {
- DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
- entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
- }
-}
-
-
-Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
- if (IsEmpty()) {
- return isolate->factory()->empty_constant_pool_array();
- } else if (extended_entries()->is_empty()) {
- return isolate->factory()->NewConstantPoolArray(*small_entries());
- } else {
- DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
- return isolate->factory()->NewExtendedConstantPoolArray(
- *small_entries(), *extended_entries());
- }
-}
-
-
-void ConstantPoolBuilder::Populate(Assembler* assm,
- ConstantPoolArray* constant_pool) {
- DCHECK_EQ(extended_entries()->is_empty(),
- !constant_pool->is_extended_layout());
- DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
- constant_pool, ConstantPoolArray::SMALL_SECTION)));
- if (constant_pool->is_extended_layout()) {
- DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
- constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
- }
-
- // Set up initial offsets.
- int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
- [ConstantPoolArray::NUMBER_OF_TYPES];
- for (int section = 0; section <= constant_pool->final_section(); section++) {
- int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
- ? small_entries()->total_count()
- : 0;
- for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
- ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
- if (number_of_entries_[section].count_of(type) != 0) {
- offsets[section][type] = constant_pool->OffsetOfElementAt(
- number_of_entries_[section].base_of(type) + section_start);
- }
- }
- }
-
- for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
- entry != entries_.end(); entry++) {
- RelocInfo rinfo = entry->rinfo_;
- RelocInfo::Mode rmode = entry->rinfo_.rmode();
- ConstantPoolArray::Type type = GetConstantPoolType(rmode);
-
- // Update constant pool if necessary and get the entry's offset.
- int offset;
- if (entry->merged_index_ == -1) {
- offset = offsets[entry->section_][type];
- offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
- if (type == ConstantPoolArray::INT64) {
-#if V8_TARGET_ARCH_PPC64
- constant_pool->set_at_offset(offset, rinfo.data());
-#else
- constant_pool->set_at_offset(offset, rinfo.data64());
- } else if (type == ConstantPoolArray::INT32) {
- constant_pool->set_at_offset(offset,
- static_cast<int32_t>(rinfo.data()));
-#endif
- } else if (type == ConstantPoolArray::CODE_PTR) {
- constant_pool->set_at_offset(offset,
- reinterpret_cast<Address>(rinfo.data()));
- } else {
- DCHECK(type == ConstantPoolArray::HEAP_PTR);
- constant_pool->set_at_offset(offset,
- reinterpret_cast<Object*>(rinfo.data()));
- }
- offset -= kHeapObjectTag;
- entry->merged_index_ = offset; // Stash offset for merged entries.
- } else {
- DCHECK(entry->merged_index_ < (entry - entries_.begin()));
- offset = entries_[entry->merged_index_].merged_index_;
- }
-
- // Patch load instruction with correct offset.
- Assembler::SetConstantPoolOffset(rinfo.pc(), offset);
- }
-}
-#endif
}
} // namespace v8::internal
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index a3949556f3..bcc2d8f6b6 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -44,8 +44,8 @@
#include <vector>
#include "src/assembler.h"
+#include "src/compiler.h"
#include "src/ppc/constants-ppc.h"
-#include "src/serialize.h"
#define ABI_USES_FUNCTION_DESCRIPTORS \
(V8_HOST_ARCH_PPC && (V8_OS_AIX || \
@@ -108,11 +108,7 @@ struct Register {
static const int kAllocatableLowRangeBegin = 3;
static const int kAllocatableLowRangeEnd = 10;
static const int kAllocatableHighRangeBegin = 14;
-#if V8_OOL_CONSTANT_POOL
- static const int kAllocatableHighRangeEnd = 27;
-#else
static const int kAllocatableHighRangeEnd = 28;
-#endif
static const int kAllocatableContext = 30;
static const int kNumAllocatableLow =
@@ -178,14 +174,18 @@ struct Register {
"r25",
"r26",
"r27",
-#if !V8_OOL_CONSTANT_POOL
"r28",
-#endif
"cp",
};
return names[index];
}
+ static const RegList kAllocatable =
+ 1 << 3 | 1 << 4 | 1 << 5 | 1 << 6 | 1 << 7 | 1 << 8 | 1 << 9 | 1 << 10 |
+ 1 << 14 | 1 << 15 | 1 << 16 | 1 << 17 | 1 << 18 | 1 << 19 | 1 << 20 |
+ 1 << 21 | 1 << 22 | 1 << 23 | 1 << 24 | 1 << 25 | 1 << 26 | 1 << 27 |
+ 1 << 28 | 1 << 30;
+
static Register from_code(int code) {
Register r = {code};
return r;
@@ -242,7 +242,7 @@ const int kRegister_r24_Code = 24;
const int kRegister_r25_Code = 25;
const int kRegister_r26_Code = 26;
const int kRegister_r27_Code = 27;
-const int kRegister_r28_Code = 28; // constant pool pointer
+const int kRegister_r28_Code = 28;
const int kRegister_r29_Code = 29; // roots array pointer
const int kRegister_r30_Code = 30; // context pointer
const int kRegister_fp_Code = 31; // frame pointer
@@ -286,9 +286,6 @@ const Register fp = {kRegister_fp_Code};
// Give alias names to registers
const Register cp = {kRegister_r30_Code}; // JavaScript context pointer
const Register kRootRegister = {kRegister_r29_Code}; // Roots array pointer.
-#if V8_OOL_CONSTANT_POOL
-const Register kConstantPoolRegister = {kRegister_r28_Code}; // Constant pool
-#endif
// Double word FP register.
struct DoubleRegister {
@@ -467,13 +464,6 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
- // For mov. Return the number of actual instructions required to
- // load the operand into a register. This can be anywhere from
- // one (constant pool small section) to five instructions (full
- // 64-bit sequence).
- //
- // The value returned is only valid as long as no entries are added to the
- // constant pool between this call and the actual instruction being emitted.
bool must_output_reloc_info(const Assembler* assembler) const;
inline intptr_t immediate() const {
@@ -527,75 +517,21 @@ class MemOperand BASE_EMBEDDED {
};
-#if V8_OOL_CONSTANT_POOL
-// Class used to build a constant pool.
-class ConstantPoolBuilder BASE_EMBEDDED {
+class DeferredRelocInfo {
public:
- ConstantPoolBuilder();
- ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
- const RelocInfo& rinfo);
- void Relocate(intptr_t pc_delta);
- bool IsEmpty();
- Handle<ConstantPoolArray> New(Isolate* isolate);
- void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
-
- inline ConstantPoolArray::LayoutSection current_section() const {
- return current_section_;
- }
-
- // Rather than increasing the capacity of the ConstantPoolArray's
- // small section to match the longer (16-bit) reach of PPC's load
- // instruction (at the expense of a larger header to describe the
- // layout), the PPC implementation utilizes the extended section to
- // satisfy that reach. I.e. all entries (regardless of their
- // section) are reachable with a single load instruction.
- //
- // This implementation does not support an unlimited constant pool
- // size (which would require a multi-instruction sequence). [See
- // ARM commit e27ab337 for a reference on the changes required to
- // support the longer instruction sequence.] Note, however, that
- // going down that path will necessarily generate that longer
- // sequence for all extended section accesses since the placement of
- // a given entry within the section is not known at the time of
- // code generation.
- //
- // TODO(mbrandy): Determine whether there is a benefit to supporting
- // the longer sequence given that nops could be used for those
- // entries which are reachable with a single instruction.
- inline bool is_full() const { return !is_int16(size_); }
-
- inline ConstantPoolArray::NumberOfEntries* number_of_entries(
- ConstantPoolArray::LayoutSection section) {
- return &number_of_entries_[section];
- }
+ DeferredRelocInfo() {}
+ DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
+ : position_(position), rmode_(rmode), data_(data) {}
- inline ConstantPoolArray::NumberOfEntries* small_entries() {
- return number_of_entries(ConstantPoolArray::SMALL_SECTION);
- }
-
- inline ConstantPoolArray::NumberOfEntries* extended_entries() {
- return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
- }
+ int position() const { return position_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+ intptr_t data() const { return data_; }
private:
- struct ConstantPoolEntry {
- ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
- int merged_index)
- : rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
-
- RelocInfo rinfo_;
- ConstantPoolArray::LayoutSection section_;
- int merged_index_;
- };
-
- ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
-
- uint32_t size_;
- std::vector<ConstantPoolEntry> entries_;
- ConstantPoolArray::LayoutSection current_section_;
- ConstantPoolArray::NumberOfEntries number_of_entries_[2];
+ int position_;
+ RelocInfo::Mode rmode_;
+ intptr_t data_;
};
-#endif
class Assembler : public AssemblerBase {
@@ -637,6 +573,12 @@ class Assembler : public AssemblerBase {
// but it may be bound only once.
void bind(Label* L); // binds an unbound label L to the current code position
+
+ // Links a label at the current pc_offset(). If already bound, returns the
+ // bound position. If already linked, returns the position of the prior link.
+ // Otherwise, returns the current pc_offset().
+ int link(Label* L);
+
// Determines if Label is bound and near enough so that a single
// branch instruction can be used to reach it.
bool is_near(Label* L, Condition cond);
@@ -644,24 +586,15 @@ class Assembler : public AssemblerBase {
// Returns the branch offset to the given label from the current code position
// Links the label to the current position if it is still unbound
// Manages the jump elimination optimization if the second parameter is true.
- int branch_offset(Label* L, bool jump_elimination_allowed);
+ int branch_offset(Label* L, bool jump_elimination_allowed) {
+ int position = link(L);
+ return position - pc_offset();
+ }
// Puts a labels target address at the given position.
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
-#if V8_OOL_CONSTANT_POOL
- INLINE(static bool IsConstantPoolLoadStart(Address pc));
- INLINE(static bool IsConstantPoolLoadEnd(Address pc));
- INLINE(static int GetConstantPoolOffset(Address pc));
- INLINE(static void SetConstantPoolOffset(Address pc, int offset));
-
- // Return the address in the constant pool of the code target address used by
- // the branch/call instruction at pc, or the object in a mov.
- INLINE(static Address target_constant_pool_address_at(
- Address pc, ConstantPoolArray* constant_pool));
-#endif
-
// Read/Modify the code target address in the branch/call instruction at pc.
INLINE(static Address target_address_at(Address pc,
ConstantPoolArray* constant_pool));
@@ -669,13 +602,13 @@ class Assembler : public AssemblerBase {
Address pc, ConstantPoolArray* constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ ConstantPoolArray* constant_pool = NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static void set_target_address_at(
Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
- ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+ ConstantPoolArray* constant_pool = NULL;
set_target_address_at(pc, constant_pool, target, icache_flush_mode);
}
@@ -695,6 +628,11 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Address instruction_payload, Code* code, Address target);
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
// Size of an instruction.
static const int kInstrSize = sizeof(Instr);
@@ -708,16 +646,11 @@ class Assembler : public AssemblerBase {
// Number of instructions to load an address via a mov sequence.
#if V8_TARGET_ARCH_PPC64
- static const int kMovInstructionsConstantPool = 2;
- static const int kMovInstructionsNoConstantPool = 5;
+ static const int kMovInstructions = 5;
+ static const int kTaggedLoadInstructions = 2;
#else
- static const int kMovInstructionsConstantPool = 1;
- static const int kMovInstructionsNoConstantPool = 2;
-#endif
-#if V8_OOL_CONSTANT_POOL
- static const int kMovInstructions = kMovInstructionsConstantPool;
-#else
- static const int kMovInstructions = kMovInstructionsNoConstantPool;
+ static const int kMovInstructions = 2;
+ static const int kTaggedLoadInstructions = 1;
#endif
// Distance between the instruction referring to the address of the call
@@ -747,15 +680,15 @@ class Assembler : public AssemblerBase {
// blrl
static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
- // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn()
+ // This is the length of the BreakLocation::SetDebugBreakAtReturn()
// code patch FIXED_SEQUENCE
- static const int kJSReturnSequenceInstructions =
- kMovInstructionsNoConstantPool + 3;
+ static const int kJSReturnSequenceInstructions = kMovInstructions + 3;
+ static const int kJSReturnSequenceLength =
+ kJSReturnSequenceInstructions * kInstrSize;
// This is the length of the code sequence from SetDebugBreakAtSlot()
// FIXED_SEQUENCE
- static const int kDebugBreakSlotInstructions =
- kMovInstructionsNoConstantPool + 2;
+ static const int kDebugBreakSlotInstructions = kMovInstructions + 2;
static const int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
@@ -1076,11 +1009,26 @@ class Assembler : public AssemblerBase {
void cmplw(Register src1, Register src2, CRegister cr = cr7);
void mov(Register dst, const Operand& src);
+ void bitwise_mov(Register dst, intptr_t value);
+ void bitwise_mov32(Register dst, int32_t value);
+ void bitwise_add32(Register dst, Register src, int32_t value);
// Load the position of the label relative to the generated code object
// pointer in a register.
void mov_label_offset(Register dst, Label* label);
+ // dst = base + label position + delta
+ void add_label_offset(Register dst, Register base, Label* label,
+ int delta = 0);
+
+ // Load the address of the label in a register and associate with an
+ // internal reference relocation.
+ void mov_label_addr(Register dst, Label* label);
+
+ // Emit the address of the label (i.e. a jump table entry) and associate with
+ // an internal reference relocation.
+ void emit_label_addr(Label* label);
+
// Multiply instructions
void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
RCBit r = LeaveRC);
@@ -1283,13 +1231,14 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
+ void RecordDeoptReason(const int reason, const SourcePosition position);
// Writes a single byte or word of data in the code stream. Used
// for inline tables, e.g., jump-tables.
void db(uint8_t data);
void dd(uint32_t data);
- void emit_ptr(uintptr_t data);
+ void emit_ptr(intptr_t data);
+ void emit_double(double data);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
@@ -1335,22 +1284,12 @@ class Assembler : public AssemblerBase {
void BlockTrampolinePoolFor(int instructions);
void CheckTrampolinePool();
- int instructions_required_for_mov(const Operand& x) const;
-
-#if V8_OOL_CONSTANT_POOL
- // Decide between using the constant pool vs. a mov immediate sequence.
- bool use_constant_pool_for_mov(const Operand& x, bool canOptimize) const;
-
// The code currently calls CheckBuffer() too often. This has the side
// effect of randomly growing the buffer in the middle of multi-instruction
// sequences.
- // MacroAssembler::LoadConstantPoolPointerRegister() includes a relocation
- // and multiple instructions. We cannot grow the buffer until the
- // relocation and all of the instructions are written.
//
// This function allows outside callers to check and grow the buffer
void EnsureSpaceFor(int space_needed);
-#endif
// Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
@@ -1358,23 +1297,7 @@ class Assembler : public AssemblerBase {
// Generate the constant pool for the generated code.
void PopulateConstantPool(ConstantPoolArray* constant_pool);
-#if V8_OOL_CONSTANT_POOL
- bool is_constant_pool_full() const {
- return constant_pool_builder_.is_full();
- }
-
- bool use_extended_constant_pool() const {
- return constant_pool_builder_.current_section() ==
- ConstantPoolArray::EXTENDED_SECTION;
- }
-#endif
-
-#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
- static void RelocateInternalReference(
- Address pc, intptr_t delta, Address code_start,
- ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
- static int DecodeInternalReference(Vector<char> buffer, Address pc);
-#endif
+ void EmitRelocations();
protected:
// Relocation for a type-recording IC has the AST id added to it. This
@@ -1392,13 +1315,7 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
- void RecordRelocInfo(const RelocInfo& rinfo);
-#if V8_OOL_CONSTANT_POOL
- ConstantPoolArray::LayoutSection ConstantPoolAddEntry(
- const RelocInfo& rinfo) {
- return constant_pool_builder_.AddEntry(this, rinfo);
- }
-#endif
+ void RecordRelocInfo(const DeferredRelocInfo& rinfo);
// Block the emission of the trampoline pool before pc_offset.
void BlockTrampolinePoolBefore(int pc_offset) {
@@ -1407,9 +1324,7 @@ class Assembler : public AssemblerBase {
}
void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
-
void EndBlockTrampolinePool() { trampoline_pool_blocked_nesting_--; }
-
bool is_trampoline_pool_blocked() const {
return trampoline_pool_blocked_nesting_ > 0;
}
@@ -1439,17 +1354,14 @@ class Assembler : public AssemblerBase {
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
+ std::vector<DeferredRelocInfo> relocations_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
-#if V8_OOL_CONSTANT_POOL
- ConstantPoolBuilder constant_pool_builder_;
-#endif
-
// Code emission
inline void CheckBuffer();
- void GrowBuffer();
+ void GrowBuffer(int needed = 0);
inline void emit(Instr x);
inline void CheckTrampolinePoolQuick();
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
index ca8704f9dd..c6f0336c4e 100644
--- a/deps/v8/src/ppc/builtins-ppc.cc
+++ b/deps/v8/src/ppc/builtins-ppc.cc
@@ -125,6 +125,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
}
+ __ mr(r6, r4);
// Run the native code for the Array function called as a normal function.
// tail call a stub
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
@@ -232,7 +233,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
__ push(r3);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
}
@@ -252,7 +253,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
}
@@ -262,7 +263,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
static void CallRuntimePassFunction(MacroAssembler* masm,
Runtime::FunctionId function_id) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push function as parameter to the runtime call.
__ Push(r4, r4);
@@ -353,7 +354,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Enter a construct frame.
{
- FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
if (create_memento) {
__ AssertUndefinedOrAllocationSite(r5, r7);
@@ -752,7 +753,7 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
CHECK(!FLAG_pretenuring_call_new);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
// Smi-tagged arguments count.
__ mr(r7, r3);
@@ -760,7 +761,9 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// receiver is the hole.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ Push(r7, ip);
+
+ // smi arguments count, new.target, receiver
+ __ Push(r7, r6, ip);
// Set up pointer to last argument.
__ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@@ -772,7 +775,8 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
// r7: number of arguments (smi-tagged)
// cr0: compare against zero of arguments
// sp[0]: receiver
- // sp[1]: number of arguments (smi-tagged)
+ // sp[1]: new.target
+ // sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
@@ -784,6 +788,23 @@ void Builtins::Generate_JSConstructStubForDerived(MacroAssembler* masm) {
__ bdnz(&loop);
__ bind(&no_args);
+ __ addi(r3, r3, Operand(1));
+
+ // Handle step in.
+ Label skip_step_in;
+ ExternalReference debug_step_in_fp =
+ ExternalReference::debug_step_in_fp_address(masm->isolate());
+ __ mov(r5, Operand(debug_step_in_fp));
+ __ LoadP(r5, MemOperand(r5));
+ __ and_(r0, r5, r5, SetRC);
+ __ beq(&skip_step_in, cr0);
+
+ __ Push(r3, r4, r4);
+ __ CallRuntime(Runtime::kHandleStepInForDerivedConstructors, 1);
+ __ Pop(r3, r4);
+
+ __ bind(&skip_step_in);
+
// Call the function.
// r3: number of arguments
// r4: constructor function
@@ -896,12 +917,14 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Push a copy of the function onto the stack.
// Push function as parameter to the runtime call.
__ Push(r4, r4);
// Whether to compile in a background thread.
- __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+ __ LoadRoot(
+ r0, concurrent ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ push(r0);
__ CallRuntime(Runtime::kCompileOptimized, 2);
// Restore receiver.
@@ -1007,7 +1030,7 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
SaveFPRegsMode save_doubles) {
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Preserve registers across notification, this is important for compiled
// stubs that tail call the runtime on deopts passing their parameters in
@@ -1036,7 +1059,7 @@ void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Pass the function and deoptimization type to the runtime system.
__ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
__ push(r3);
@@ -1084,7 +1107,7 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Lookup the function in the JavaScript frame.
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Pass function as argument.
__ push(r3);
__ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
@@ -1102,12 +1125,8 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
-#if V8_OOL_CONSTANT_POOL
{
- ConstantPoolUnavailableScope constant_pool_unavailable(masm);
- __ LoadP(kConstantPoolRegister,
- FieldMemOperand(r3, Code::kConstantPoolOffset));
-#endif
+ __ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -1116,17 +1135,13 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
DeoptimizationInputData::kOsrPcOffsetIndex)));
__ SmiUntag(r4);
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ add(r3, r3, r4);
- __ addi(r0, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ mtlr(r0);
+ // Compute the target address = code start + osr_offset
+ __ add(r0, r3, r4);
// And "return" to the OSR entry point of the function.
- __ Ret();
-#if V8_OOL_CONSTANT_POOL
+ __ mtlr(r0);
+ __ blr();
}
-#endif
}
@@ -1137,7 +1152,7 @@ void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
__ cmpl(sp, ip);
__ bge(&ok);
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kStackGuard, 0);
}
__ Jump(masm->isolate()->builtins()->OnStackReplacement(),
@@ -1228,7 +1243,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
{
// Enter an internal frame in order to preserve argument count.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r3);
__ Push(r3, r5);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -1351,50 +1366,99 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- const int kIndexOffset =
- StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
- const int kLimitOffset =
- StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
- const int kArgsOffset = 2 * kPointerSize;
- const int kRecvOffset = 3 * kPointerSize;
- const int kFunctionOffset = 4 * kPointerSize;
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+ const int calleeOffset) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+ // Make r5 the space we have left. The stack might already be overflowed
+ // here which will cause r5 to become negative.
+ __ sub(r5, sp, r5);
+ // Check if the arguments will overflow the stack.
+ __ SmiToPtrArrayOffset(r0, r3);
+ __ cmp(r5, r0);
+ __ bgt(&okay); // Signed comparison.
+
+ // Out of stack space.
+ __ LoadP(r4, MemOperand(fp, calleeOffset));
+ __ Push(r4, r3);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+
+ __ bind(&okay);
+}
+
+
+static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int argumentsOffset,
+ const int indexOffset,
+ const int limitOffset) {
+ Label entry, loop;
+ __ LoadP(r3, MemOperand(fp, indexOffset));
+ __ b(&entry);
+
+ // Load the current argument from the arguments array and push it to the
+ // stack.
+ // r3: current argument index
+ __ bind(&loop);
+ __ LoadP(r4, MemOperand(fp, argumentsOffset));
+ __ Push(r4, r3);
+
+ // Call the runtime to access the property in the arguments array.
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ __ push(r3);
+
+ // Use inline caching to access the arguments.
+ __ LoadP(r3, MemOperand(fp, indexOffset));
+ __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
+ __ StoreP(r3, MemOperand(fp, indexOffset));
+
+ // Test if the copy loop has finished copying all the elements from the
+ // arguments object.
+ __ bind(&entry);
+ __ LoadP(r4, MemOperand(fp, limitOffset));
+ __ cmp(r3, r4);
+ __ bne(&loop);
+
+ // On exit, the pushed arguments count is in r0, untagged
+ __ SmiUntag(r3);
+}
+
+
+// Used by FunctionApply and ReflectApply
+static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
+ const int kFormalParameters = targetIsArgument ? 3 : 2;
+ const int kStackSize = kFormalParameters + 1;
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ const int kFunctionOffset = kReceiverOffset + kPointerSize;
__ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
__ push(r3);
- __ LoadP(r3, MemOperand(fp, kArgsOffset)); // get the args array
+ __ LoadP(r3, MemOperand(fp, kArgumentsOffset)); // get the args array
__ push(r3);
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
- // Make r5 the space we have left. The stack might already be overflowed
- // here which will cause r5 to become negative.
- __ sub(r5, sp, r5);
- // Check if the arguments will overflow the stack.
- __ SmiToPtrArrayOffset(r0, r3);
- __ cmp(r5, r0);
- __ bgt(&okay); // Signed comparison.
-
- // Out of stack space.
- __ LoadP(r4, MemOperand(fp, kFunctionOffset));
- __ Push(r4, r3);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- // End of stack check.
+ if (targetIsArgument) {
+ __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ } else {
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ }
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current limit and index.
- __ bind(&okay);
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
__ li(r4, Operand::Zero());
__ Push(r3, r4); // limit and initial index.
// Get the receiver.
- __ LoadP(r3, MemOperand(fp, kRecvOffset));
+ __ LoadP(r3, MemOperand(fp, kReceiverOffset));
// Check that the function is a JS function (otherwise it must be a proxy).
Label push_receiver;
@@ -1462,43 +1526,18 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r3);
// Copy all arguments from the array to the stack.
- Label entry, loop;
- __ LoadP(r3, MemOperand(fp, kIndexOffset));
- __ b(&entry);
-
- // Load the current argument from the arguments array and push it to the
- // stack.
- // r3: current argument index
- __ bind(&loop);
- __ LoadP(r4, MemOperand(fp, kArgsOffset));
- __ Push(r4, r3);
-
- // Call the runtime to access the property in the arguments array.
- __ CallRuntime(Runtime::kGetProperty, 2);
- __ push(r3);
-
- // Use inline caching to access the arguments.
- __ LoadP(r3, MemOperand(fp, kIndexOffset));
- __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
- __ StoreP(r3, MemOperand(fp, kIndexOffset));
-
- // Test if the copy loop has finished copying all the elements from the
- // arguments object.
- __ bind(&entry);
- __ LoadP(r4, MemOperand(fp, kLimitOffset));
- __ cmp(r3, r4);
- __ bne(&loop);
+ Generate_PushAppliedArguments(masm, kArgumentsOffset, kIndexOffset,
+ kLimitOffset);
// Call the function.
Label call_proxy;
ParameterCount actual(r3);
- __ SmiUntag(r3);
__ LoadP(r4, MemOperand(fp, kFunctionOffset));
__ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
__ bne(&call_proxy);
__ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
- __ LeaveFrame(StackFrame::INTERNAL, 3 * kPointerSize);
+ __ LeaveFrame(StackFrame::INTERNAL, kStackSize * kPointerSize);
__ blr();
// Call the function proxy.
@@ -1512,11 +1551,90 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Tear down the internal frame and remove function, receiver and args.
}
- __ addi(sp, sp, Operand(3 * kPointerSize));
+ __ addi(sp, sp, Operand(kStackSize * kPointerSize));
__ blr();
}
+static void Generate_ConstructHelper(MacroAssembler* masm) {
+ const int kFormalParameters = 3;
+ const int kStackSize = kFormalParameters + 1;
+
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
+ const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
+ const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+
+ // If newTarget is not supplied, set it to constructor
+ Label validate_arguments;
+ __ LoadP(r3, MemOperand(fp, kNewTargetOffset));
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ bne(&validate_arguments);
+ __ LoadP(r3, MemOperand(fp, kFunctionOffset));
+ __ StoreP(r3, MemOperand(fp, kNewTargetOffset));
+
+ // Validate arguments
+ __ bind(&validate_arguments);
+ __ LoadP(r3, MemOperand(fp, kFunctionOffset)); // get the function
+ __ push(r3);
+ __ LoadP(r3, MemOperand(fp, kArgumentsOffset)); // get the args array
+ __ push(r3);
+ __ LoadP(r3, MemOperand(fp, kNewTargetOffset)); // get the new.target
+ __ push(r3);
+ __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
+
+ // Push current limit and index.
+ const int kIndexOffset =
+ StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+ __ li(r4, Operand::Zero());
+ __ Push(r3, r4); // limit and initial index.
+ // Push newTarget and callee functions
+ __ LoadP(r3, MemOperand(fp, kNewTargetOffset));
+ __ push(r3);
+ __ LoadP(r3, MemOperand(fp, kFunctionOffset));
+ __ push(r3);
+
+ // Copy all arguments from the array to the stack.
+ Generate_PushAppliedArguments(masm, kArgumentsOffset, kIndexOffset,
+ kLimitOffset);
+
+ // Use undefined feedback vector
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ LoadP(r4, MemOperand(fp, kFunctionOffset));
+
+ // Call the function.
+ CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ // Leave internal frame.
+ }
+ __ addi(sp, sp, Operand(kStackSize * kPointerSize));
+ __ blr();
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, false);
+}
+
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, true);
+}
+
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ Generate_ConstructHelper(masm);
+}
+
+
static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
Label* stack_overflow) {
// ----------- S t a t e -------------
@@ -1543,11 +1661,7 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ mflr(r0);
__ push(r0);
-#if V8_OOL_CONSTANT_POOL
- __ Push(fp, kConstantPoolRegister, r7, r4, r3);
-#else
__ Push(fp, r7, r4, r3);
-#endif
__ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize));
}
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 0226ffbf57..589f6d825e 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -12,6 +12,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@@ -110,7 +111,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
int param_count = descriptor.GetEnvironmentParameterCount();
{
// Call the runtime system in a fresh internal frame.
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
r3.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
// Push arguments
@@ -1070,22 +1071,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// know where the return address is. The CEntryStub is unmovable, so
// we can store the address on the stack to be able to find it again and
// we never have to restore it, because it will not change.
- // Compute the return address in lr to return to after the jump below. Pc is
- // already at '+ 8' from the current instruction but return is after three
- // instructions so add another 4 to pc to get the return address.
- {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
- Label here;
- __ b(&here, SetLK);
- __ bind(&here);
- __ mflr(r8);
-
- // Constant used below is dependent on size of Call() macro instructions
- __ addi(r0, r8, Operand(20));
-
- __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
- __ Call(target);
- }
+ Label after_call;
+ __ mov_label_addr(r0, &after_call);
+ __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+ __ Call(target);
+ __ bind(&after_call);
#if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
// If return value is on the stack, pop it to registers.
@@ -1110,13 +1100,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r3, Heap::kExceptionRootIndex);
__ beq(&exception_returned);
- ExternalReference pending_exception_address(Isolate::kPendingExceptionAddress,
- isolate());
-
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
Label okay;
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
+
__ mov(r5, Operand(pending_exception_address));
__ LoadP(r5, MemOperand(r5));
__ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
@@ -1137,25 +1127,53 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ bind(&exception_returned);
- // Retrieve the pending exception.
- __ mov(r5, Operand(pending_exception_address));
- __ LoadP(r3, MemOperand(r5));
-
- // Clear the pending exception.
- __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
- __ StoreP(r6, MemOperand(r5));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- Label throw_termination_exception;
- __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
- __ beq(&throw_termination_exception);
-
- // Handle normal exception.
- __ Throw(r3);
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
+
+ // Ask the runtime for help to determine the handler. This will set r3 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, 0, r3);
+ __ li(r3, Operand::Zero());
+ __ li(r4, Operand::Zero());
+ __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(find_handler, 3);
+ }
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(r3);
+ // Retrieve the handler context, SP and FP.
+ __ mov(cp, Operand(pending_handler_context_address));
+ __ LoadP(cp, MemOperand(cp));
+ __ mov(sp, Operand(pending_handler_sp_address));
+ __ LoadP(sp, MemOperand(sp));
+ __ mov(fp, Operand(pending_handler_fp_address));
+ __ LoadP(fp, MemOperand(fp));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label skip;
+ __ cmpi(cp, Operand::Zero());
+ __ beq(&skip);
+ __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&skip);
+
+ // Compute the handler entry address and jump to it.
+ __ mov(r4, Operand(pending_handler_code_address));
+ __ LoadP(r4, MemOperand(r4));
+ __ mov(r5, Operand(pending_handler_offset_address));
+ __ LoadP(r5, MemOperand(r5));
+ __ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
+ __ add(ip, r4, r5);
+ __ Jump(ip);
}
@@ -1195,11 +1213,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r7: argv
__ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ push(r0);
-#if V8_OOL_CONSTANT_POOL
- __ mov(kConstantPoolRegister,
- Operand(isolate()->factory()->empty_constant_pool_array()));
- __ push(kConstantPoolRegister);
-#endif
int marker = type();
__ LoadSmiLiteral(r0, Smi::FromInt(marker));
__ push(r0);
@@ -1236,7 +1249,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
// field in the JSEnv and return a failure sentinel. Coming in here the
- // fp will be invalid because the PushTryHandler below sets it to 0 to
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
@@ -1245,11 +1258,10 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ LoadRoot(r3, Heap::kExceptionRootIndex);
__ b(&exit);
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ bind(&invoke);
- // Must preserve r0-r4, r5-r7 are available. (needs update for PPC)
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ // Must preserve r3-r7.
+ __ PushStackHandler();
// If an exception not caught by another handler occurs, this handler
// returns control to the code after the b(&invoke) above, which
// restores all kCalleeSaved registers (including cp and fp) to their
@@ -1288,7 +1300,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ bctrl(); // make the call
// Unlink this frame from the handler chain.
- __ PopTryHandler();
+ __ PopStackHandler();
__ bind(&exit); // r3 holds result
// Check if the current stack frame is marked as the outermost JS frame.
@@ -1347,14 +1359,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
const Register scratch = r5;
Register scratch3 = no_reg;
-// delta = mov + unaligned LoadP + cmp + bne
-#if V8_TARGET_ARCH_PPC64
- const int32_t kDeltaToLoadBoolResult =
- (Assembler::kMovInstructions + 4) * Assembler::kInstrSize;
-#else
+ // delta = mov + tagged LoadP + cmp + bne
const int32_t kDeltaToLoadBoolResult =
- (Assembler::kMovInstructions + 3) * Assembler::kInstrSize;
-#endif
+ (Assembler::kMovInstructions + Assembler::kTaggedLoadInstructions + 2) *
+ Assembler::kInstrSize;
Label slow, loop, is_instance, is_not_instance, not_js_object;
@@ -1514,7 +1522,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
__ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
} else {
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r3, r4);
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
}
@@ -1584,7 +1592,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ Ret();
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -1593,6 +1601,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ CHECK(!has_new_target());
// The displacement is the offset of the last parameter (if any)
// relative to the frame pointer.
const int kDisplacement =
@@ -1653,6 +1662,8 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
// sp[1] : receiver displacement
// sp[2] : function
+ CHECK(!has_new_target());
+
// Check if the calling frame is an arguments adaptor frame.
Label runtime;
__ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -1683,6 +1694,8 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
// r9 : allocated object (tagged)
// r11 : mapped parameter count (tagged)
+ CHECK(!has_new_target());
+
__ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
// r4 = parameter count (tagged)
@@ -1965,6 +1978,14 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
// Patch the arguments.length and the parameters pointer.
__ bind(&adaptor_frame);
__ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ if (has_new_target()) {
+ __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ Label skip_decrement;
+ __ beq(&skip_decrement);
+ // Subtract 1 from smi-tagged arguments count.
+ __ SubSmiLiteral(r4, r4, Smi::FromInt(1), r0);
+ __ bind(&skip_decrement);
+ }
__ StoreP(r4, MemOperand(sp, 0));
__ SmiToPtrArrayOffset(r6, r4);
__ add(r6, r5, r6);
@@ -2051,12 +2072,37 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
}
+void RestParamAccessStub::GenerateNew(MacroAssembler* masm) {
+ // Stack layout on entry.
+ // sp[0] : index of rest parameter
+ // sp[4] : number of parameters
+ // sp[8] : receiver displacement
+
+ Label runtime;
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ StoreP(r4, MemOperand(sp, 1 * kPointerSize));
+ __ SmiToPtrArrayOffset(r6, r4);
+ __ add(r6, r5, r6);
+ __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ StoreP(r6, MemOperand(sp, 2 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewRestParam, 3, 1);
+}
+
+
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -2323,20 +2369,20 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ LeaveExitFrame(false, no_reg, true);
- // r3: result
+ // r3: result (int32)
// subject: subject string (callee saved)
// regexp_data: RegExp data (callee saved)
// last_match_info_elements: Last match info elements (callee saved)
// Check the result.
Label success;
- __ cmpi(r3, Operand(1));
+ __ cmpwi(r3, Operand(1));
// We expect exactly one result since we force the called regexp to behave
// as non-global.
__ beq(&success);
Label failure;
- __ cmpi(r3, Operand(NativeRegExpMacroAssembler::FAILURE));
+ __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::FAILURE));
__ beq(&failure);
- __ cmpi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+ __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION));
// If not exception it can only be retry. Handle that in the runtime system.
__ bne(&runtime);
// Result must now be exception. If there is no pending exception already a
@@ -2350,18 +2396,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ cmp(r3, r4);
__ beq(&runtime);
- __ StoreP(r4, MemOperand(r5, 0)); // Clear pending exception.
-
- // Check if the exception is a termination. If so, throw as uncatchable.
- __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
-
- Label termination_exception;
- __ beq(&termination_exception);
-
- __ Throw(r3);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(r3);
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ bind(&failure);
// For failure and exception return null.
@@ -2450,7 +2486,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (6) Not a long external string? If yes, go to (8).
@@ -2562,7 +2598,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Create an AllocationSite if we don't already have it, store it in the
// slot.
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Arguments register must be smi-tagged to call out.
__ SmiTag(r3);
@@ -2648,7 +2684,7 @@ static void EmitSlowCase(MacroAssembler* masm, int argc, Label* non_function) {
static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
// Wrap the receiver and patch it back onto the stack.
{
- FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
__ Push(r4, r6);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ pop(r4);
@@ -2760,7 +2796,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
// Pass function as original constructor.
- __ mr(r6, r4);
+ if (IsSuperConstructorCall()) {
+ __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
+ __ addi(r7, r7, Operand(kPointerSize));
+ __ LoadPX(r6, MemOperand(sp, r7));
+ } else {
+ __ mr(r6, r4);
+ }
// Jump to the function-specific construct stub.
Register jmp_reg = r7;
@@ -2823,6 +2865,7 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
__ bne(&miss);
__ mr(r5, r7);
+ __ mr(r6, r4);
ArrayConstructorStub stub(masm->isolate(), arg_count());
__ TailCallStub(&stub);
@@ -2959,7 +3002,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
// r6 - slot
// r4 - function
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
__ Push(r4);
__ CallStub(&create_stub);
@@ -2987,7 +3030,7 @@ void CallICStub::Generate(MacroAssembler* masm) {
void CallICStub::GenerateMiss(MacroAssembler* masm) {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Push the function and feedback info.
__ Push(r4, r5, r6);
@@ -3038,7 +3081,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ MacroAssembler* masm, EmbedMode embed_mode,
+ const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
// Index is not a smi.
@@ -3047,8 +3091,13 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
- __ push(object_);
- __ push(index_); // Consumed by runtime conversion function.
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister(), object_, index_);
+ } else {
+ // index_ is consumed by runtime conversion function.
+ __ Push(object_, index_);
+ }
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
} else {
@@ -3059,7 +3108,12 @@ void StringCharCodeAtGenerator::GenerateSlow(
// Save the conversion result before the pop instructions below
// have a chance to overwrite it.
__ Move(index_, r3);
- __ pop(object_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(VectorLoadICDescriptor::VectorRegister(),
+ VectorLoadICDescriptor::SlotRegister(), object_);
+ } else {
+ __ pop(object_);
+ }
// Reload the instance type.
__ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
__ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
@@ -3371,7 +3425,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// r3: original string
@@ -3504,8 +3558,8 @@ void StringHelper::GenerateCompareFlatOneByteStrings(
// Conditionally update the result based either on length_delta or
// the last comparion performed in the loop above.
if (CpuFeatures::IsSupported(ISELECT)) {
- __ li(r4, Operand(GREATER));
- __ li(r5, Operand(LESS));
+ __ LoadSmiLiteral(r4, Smi::FromInt(GREATER));
+ __ LoadSmiLiteral(r5, Smi::FromInt(LESS));
__ isel(eq, r3, r0, r4);
__ isel(lt, r3, r5, r3);
__ Ret();
@@ -3584,7 +3638,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@@ -3890,7 +3944,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ bind(&miss);
@@ -3945,7 +3999,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference miss =
ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r4, r3);
__ Push(r4, r3);
__ LoadSmiLiteral(r0, Smi::FromInt(op()));
@@ -4509,15 +4563,15 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorLoadStub stub(isolate(), state());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawLoadStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorKeyedLoadStub stub(isolate());
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawKeyedLoadStub stub(isolate());
+ stub.GenerateForTrampoline(masm);
}
@@ -4535,6 +4589,248 @@ void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
}
+void VectorRawLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register feedback, Register scratch1,
+ Register scratch2, Register scratch3,
+ bool is_polymorphic, Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+
+ Register receiver_map = scratch1;
+ Register cached_map = scratch2;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ bind(&compare_map);
+ __ LoadP(cached_map,
+ FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+ __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ cmp(receiver_map, cached_map);
+ __ bne(&start_polymorphic);
+ // found, now call handler.
+ Register handler = feedback;
+ __ LoadP(handler,
+ FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+
+ Register length = scratch3;
+ __ bind(&start_polymorphic);
+ __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
+ if (!is_polymorphic) {
+ // If the IC could be monomorphic we have to make sure we don't go past the
+ // end of the feedback array.
+ __ CmpSmiLiteral(length, Smi::FromInt(2), r0);
+ __ beq(miss);
+ }
+
+ Register too_far = length;
+ Register pointer_reg = feedback;
+
+ // +-----+------+------+-----+-----+ ... ----+
+ // | map | len | wm0 | h0 | wm1 | hN |
+ // +-----+------+------+-----+-----+ ... ----+
+ // 0 1 2 len-1
+ // ^ ^
+ // | |
+ // pointer_reg too_far
+ // aka feedback scratch3
+ // also need receiver_map (aka scratch1)
+ // use cached_map (scratch2) to look in the weak map values.
+ __ SmiToPtrArrayOffset(r0, length);
+ __ add(too_far, feedback, r0);
+ __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ addi(pointer_reg, feedback,
+ Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
+
+ __ bind(&next_loop);
+ __ LoadP(cached_map, MemOperand(pointer_reg));
+ __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
+ __ cmp(receiver_map, cached_map);
+ __ bne(&prepare_next);
+ __ LoadP(handler, MemOperand(pointer_reg, kPointerSize));
+ __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ __ bind(&prepare_next);
+ __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
+ __ cmp(pointer_reg, too_far);
+ __ blt(&next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ b(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ b(&compare_map);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register weak_cell, Register scratch,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+ Register receiver_map = scratch;
+ Register cached_map = weak_cell;
+
+ // Move the weak map into the weak_cell register.
+ __ LoadP(cached_map, FieldMemOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ cmp(cached_map, receiver_map);
+ __ bne(miss);
+
+ Register handler = weak_cell;
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(handler, vector, r0);
+ __ LoadP(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ __ bind(&compare_smi_map);
+ __ CompareRoot(weak_cell, Heap::kHeapNumberMapRootIndex);
+ __ bne(miss);
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(handler, vector, r0);
+ __ LoadP(handler,
+ FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
+ __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(ip);
+}
+
+
+void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r4
+ Register name = VectorLoadICDescriptor::NameRegister(); // r5
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // r6
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // r3
+ Register feedback = r7;
+ Register scratch1 = r8;
+
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(feedback, vector, r0);
+ __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
+ __ bne(&try_array);
+ HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback, scratch1,
+ &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array);
+ HandleArrayCases(masm, receiver, name, vector, slot, feedback, scratch1, r9,
+ r10, true, &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags,
+ false, receiver, name, feedback,
+ scratch1, r9, r10);
+
+ __ bind(&miss);
+ LoadIC::GenerateMiss(masm);
+}
+
+
+void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // r4
+ Register key = VectorLoadICDescriptor::NameRegister(); // r5
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // r6
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // r3
+ Register feedback = r7;
+ Register scratch1 = r8;
+
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(feedback, vector, r0);
+ __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
+ __ CompareRoot(scratch1, Heap::kWeakCellMapRootIndex);
+ __ bne(&try_array);
+ HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, scratch1,
+ &miss);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
+ __ bne(&not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r9,
+ r10, true, &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ bne(&try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmp(key, feedback);
+ __ bne(&miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ SmiToPtrArrayOffset(r0, slot);
+ __ add(feedback, vector, r0);
+ __ LoadP(feedback,
+ FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, scratch1, r9,
+ r10, false, &miss);
+
+ __ bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
PredictableCodeSizeScope predictable(masm,
@@ -4802,6 +5098,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// -- r3 : argc (only if argument_count() == ANY)
// -- r4 : constructor
// -- r5 : AllocationSite or undefined
+ // -- r6 : original constructor
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
@@ -4822,6 +5119,10 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ AssertUndefinedOrAllocationSite(r5, r7);
}
+ Label subclassing;
+ __ cmp(r6, r4);
+ __ bne(&subclassing);
+
Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
@@ -4835,6 +5136,27 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+
+ __ bind(&subclassing);
+ __ push(r4);
+ __ push(r6);
+
+ // Adjust argc.
+ switch (argument_count()) {
+ case ANY:
+ case MORE_THAN_ONE:
+ __ addi(r3, r3, Operand(2));
+ break;
+ case NONE:
+ __ li(r3, Operand(2));
+ break;
+ case ONE:
+ __ li(r3, Operand(3));
+ break;
+ }
+
+ __ JumpToExternalReference(
+ ExternalReference(Runtime::kArrayConstructorWithSubclassing, isolate()));
}
@@ -4997,7 +5319,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
}
Label promote_scheduled_exception;
- Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label return_value_loaded;
@@ -5019,15 +5340,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ cmp(r15, r0);
__ bne(&delete_allocated_handles);
- // Check if the function scheduled an exception.
+ // Leave the API exit frame.
__ bind(&leave_exit_frame);
- __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
- __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
- __ LoadP(r15, MemOperand(r15));
- __ cmp(r14, r15);
- __ bne(&promote_scheduled_exception);
- __ bind(&exception_handled);
-
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
__ LoadP(cp, *context_restore_operand);
@@ -5039,15 +5353,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ mov(r14, Operand(stack_space));
}
__ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
+ __ LoadP(r15, MemOperand(r15));
+ __ cmp(r14, r15);
+ __ bne(&promote_scheduled_exception);
+
__ blr();
+ // Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallExternalReference(
- ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
- }
- __ jmp(&exception_handled);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
index 93d32c2bc6..c0398aebed 100644
--- a/deps/v8/src/ppc/codegen-ppc.cc
+++ b/deps/v8/src/ppc/codegen-ppc.cc
@@ -646,9 +646,9 @@ void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
- ConstantPoolArray* constant_pool = NULL;
- Address target_address = Assembler::target_address_at(
- sequence + kCodeAgingTargetDelta, constant_pool);
+ Code* code = NULL;
+ Address target_address =
+ Assembler::target_address_at(sequence + kCodeAgingTargetDelta, code);
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
diff --git a/deps/v8/src/ppc/debug-ppc.cc b/deps/v8/src/ppc/debug-ppc.cc
index 8106853134..f59f6371de 100644
--- a/deps/v8/src/ppc/debug-ppc.cc
+++ b/deps/v8/src/ppc/debug-ppc.cc
@@ -12,12 +12,7 @@
namespace v8 {
namespace internal {
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtReturn() {
+void BreakLocation::SetDebugBreakAtReturn() {
// Patch the code changing the return from JS function sequence from
//
// LeaveFrame
@@ -31,7 +26,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
// blrl
// bkpt
//
- CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+ CodePatcher patcher(pc(), Assembler::kJSReturnSequenceInstructions);
Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
patcher.masm()->mov(
v8::internal::r0,
@@ -45,29 +40,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
}
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceInstructions);
-}
-
-
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
-}
-
-
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
-}
-
-
-void BreakLocationIterator::SetDebugBreakAtSlot() {
+void BreakLocation::SetDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
// Patch the code changing the debug break slot code from
//
@@ -83,7 +56,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
// mtlr r0
// blrl
//
- CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+ CodePatcher patcher(pc(), Assembler::kDebugBreakSlotInstructions);
Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
patcher.masm()->mov(
v8::internal::r0,
@@ -94,13 +67,6 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
}
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kDebugBreakSlotInstructions);
-}
-
-
#define __ ACCESS_MASM(masm)
@@ -108,7 +74,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
RegList object_regs,
RegList non_object_regs) {
{
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ FrameScope scope(masm, StackFrame::INTERNAL);
// Load padding words on stack.
__ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
@@ -317,8 +283,7 @@ void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
__ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
kPointerSize));
- // Pop return address, frame and constant pool pointer (if
- // FLAG_enable_ool_constant_pool).
+ // Pop return address and frame
__ LeaveFrame(StackFrame::INTERNAL);
// Load context from the function.
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index ac1504c020..74c88e37a7 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -142,7 +142,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Unlike on ARM we don't save all the registers, just the useful ones.
@@ -172,6 +172,9 @@ void Deoptimizer::EntryGenerator::Generate() {
}
}
+ __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+ __ StoreP(fp, MemOperand(ip));
+
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
@@ -353,13 +356,8 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
-#if V8_OOL_CONSTANT_POOL
- DCHECK(FLAG_enable_ool_constant_pool);
- SetFrameSlot(offset, value);
-#else
// No out-of-line constant pool support.
UNREACHABLE();
-#endif
}
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
index 3472828eee..2486741350 100644
--- a/deps/v8/src/ppc/disasm-ppc.cc
+++ b/deps/v8/src/ppc/disasm-ppc.cc
@@ -988,6 +988,15 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x ",
instr->InstructionBits());
+#if ABI_USES_FUNCTION_DESCRIPTORS
+ // The first field will be identified as a jump table entry. We emit the rest
+ // of the structure as zero, so just skip past them.
+ if (instr->InstructionBits() == 0) {
+ Format(instr, "constant");
+ return Instruction::kInstrSize;
+ }
+#endif
+
switch (instr->OpcodeValue() << 26) {
case TWI: {
PrintSoftwareInterrupt(instr->SvcValue());
diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frames-ppc.cc
index 4b52882b0c..00af7c9b01 100644
--- a/deps/v8/src/ppc/frames-ppc.cc
+++ b/deps/v8/src/ppc/frames-ppc.cc
@@ -21,38 +21,22 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
-#if V8_OOL_CONSTANT_POOL
- DCHECK(FLAG_enable_ool_constant_pool);
- return kConstantPoolRegister;
-#else
UNREACHABLE();
return no_reg;
-#endif
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
-#if V8_OOL_CONSTANT_POOL
- DCHECK(FLAG_enable_ool_constant_pool);
- return kConstantPoolRegister;
-#else
UNREACHABLE();
return no_reg;
-#endif
}
Object*& ExitFrame::constant_pool_slot() const {
-#if V8_OOL_CONSTANT_POOL
- DCHECK(FLAG_enable_ool_constant_pool);
- const int offset = ExitFrameConstants::kConstantPoolOffset;
- return Memory::Object_at(fp() + offset);
-#else
UNREACHABLE();
return Memory::Object_at(NULL);
-#endif
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/ppc/frames-ppc.h b/deps/v8/src/ppc/frames-ppc.h
index f00fa668a8..40a68b3a37 100644
--- a/deps/v8/src/ppc/frames-ppc.h
+++ b/deps/v8/src/ppc/frames-ppc.h
@@ -57,15 +57,8 @@ const int kNumCalleeSaved = 18;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
-// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 32;
-// Define the list of registers actually saved at safepoints.
-// Note that the number of saved registers may be smaller than the reserved
-// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
-const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
-
// The following constants describe the stack frame linkage area as
// defined by the ABI. Note that kNumRequiredStackFrameSlots must
// satisfy alignment requirements (rounding up if required).
@@ -123,13 +116,8 @@ class EntryFrameConstants : public AllStatic {
class ExitFrameConstants : public AllStatic {
public:
-#if V8_OOL_CONSTANT_POOL
- static const int kFrameSize = 3 * kPointerSize;
- static const int kConstantPoolOffset = -3 * kPointerSize;
-#else
static const int kFrameSize = 2 * kPointerSize;
static const int kConstantPoolOffset = 0; // Not used.
-#endif
static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
@@ -193,9 +181,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-inline void StackHandler::SetFp(Address slot, Address fp) {
- Memory::Address_at(slot) = fp;
-}
}
} // namespace v8::internal
diff --git a/deps/v8/src/ppc/full-codegen-ppc.cc b/deps/v8/src/ppc/full-codegen-ppc.cc
index 26503c8cd5..a12f17eba3 100644
--- a/deps/v8/src/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/ppc/full-codegen-ppc.cc
@@ -104,7 +104,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@@ -200,9 +201,9 @@ void FullCodeGenerator::Generate() {
// Argument to NewContext is the function, which is still in r4.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
- if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+ if (info->scope()->is_script_scope()) {
__ push(r4);
- __ Push(info->scope()->GetScopeInfo());
+ __ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
@@ -245,6 +246,35 @@ void FullCodeGenerator::Generate() {
}
}
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+
+ // Possibly allocate RestParameters
+ int rest_index;
+ Variable* rest_param = scope()->rest_parameter(&rest_index);
+ if (rest_param) {
+ Comment cmnt(masm_, "[ Allocate rest parameter array");
+
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
+ if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
+ --num_parameters;
+ ++rest_index;
+ }
+
+ __ addi(r6, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ mov(r5, Operand(Smi::FromInt(num_parameters)));
+ __ mov(r4, Operand(Smi::FromInt(rest_index)));
+ __ Push(r6, r5, r4);
+
+ RestParamAccessStub stub(isolate());
+ __ CallStub(&stub);
+
+ SetVar(rest_param, r3, r4, r5);
+ }
+
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
@@ -267,14 +297,14 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
ArgumentsAccessStub::Type type;
- if (is_strict(language_mode())) {
+ if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
} else if (function()->has_duplicate_parameters()) {
type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub stub(isolate(), type);
+ ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
SetVar(arguments, r3, r4, r5);
@@ -432,7 +462,11 @@ void FullCodeGenerator::EmitReturnSequence() {
// sequence.
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+ int32_t arg_count = info_->scope()->num_parameters() + 1;
+ if (IsSubclassConstructor(info_->function()->kind())) {
+ arg_count++;
+ }
+ int32_t sp_delta = arg_count * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
@@ -440,9 +474,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// With 64bit we may need nop() instructions to ensure we have
// enough space to SetDebugBreakAtReturn()
if (is_int16(sp_delta)) {
-#if !V8_OOL_CONSTANT_POOL
masm_->nop();
-#endif
masm_->nop();
}
#endif
@@ -1457,7 +1489,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
- CallLoadIC(CONTEXTUAL);
+ CallGlobalLoadIC(var->name());
context()->Plug(r3);
break;
}
@@ -2101,7 +2133,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
__ LoadP(r6, MemOperand(sp, 1 * kPointerSize)); // iter
__ Push(load_name, r6, r3); // "throw", iter, except
@@ -2112,16 +2143,17 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(r3); // result
- __ PushTryHandler(StackHandler::CATCH, expr->index());
- const int handler_size = StackHandlerConstants::kSize;
+ EnterTryBlock(expr->index(), &l_catch);
+ const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(r3); // result
__ b(&l_suspend);
__ bind(&l_continuation);
__ b(&l_resume);
__ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + handler_size;
+ const int generator_object_depth = kPointerSize + try_block_size;
__ LoadP(r3, MemOperand(sp, generator_object_depth));
__ push(r3); // g
+ __ Push(Smi::FromInt(expr->index())); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ LoadSmiLiteral(r4, Smi::FromInt(l_continuation.pos()));
__ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
@@ -2130,12 +2162,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mr(r4, cp);
__ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
kLRHasBeenSaved, kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ pop(r3); // result
EmitReturnSequence();
__ bind(&l_resume); // received in r3
- __ PopTryHandler();
+ ExitTryBlock(expr->index());
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2256,13 +2288,7 @@ void FullCodeGenerator::EmitGeneratorResume(
Label slow_resume;
__ bne(&slow_resume, cr0);
__ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset));
-#if V8_OOL_CONSTANT_POOL
{
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- // Load the new code object's constant pool pointer.
- __ LoadP(kConstantPoolRegister,
- MemOperand(ip, Code::kConstantPoolOffset - Code::kHeaderSize));
-#endif
__ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
__ SmiUntag(r5);
__ add(ip, ip, r5);
@@ -2272,9 +2298,7 @@ void FullCodeGenerator::EmitGeneratorResume(
r0);
__ Jump(ip);
__ bind(&slow_resume);
-#if V8_OOL_CONSTANT_POOL
}
-#endif
} else {
__ beq(&call_resume, cr0);
}
@@ -2538,6 +2562,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
__ push(scratch);
EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ push(r3);
+ }
+
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@@ -2681,25 +2715,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(r3);
- __ mov(r3, Operand(var->name()));
- __ Push(cp, r3); // Context and name.
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackAllocated() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, r4);
- __ LoadP(r5, location);
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
- __ bne(&skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2716,6 +2731,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, r4);
+ __ LoadP(r6, location);
+ __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
+ __ bne(&const_error);
+ __ mov(r6, Operand(var->name()));
+ __ push(r6);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2737,8 +2767,32 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ DCHECK(var->mode() == CONST_LEGACY);
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ push(r3);
+ __ mov(r3, Operand(var->name()));
+ __ Push(cp, r3); // Context and name.
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, r4);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ bne(&skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ }
+ // Silently ignore store in sloppy mode.
}
}
@@ -2865,7 +2919,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
- __ Push(isolate()->factory()->undefined_value());
+ __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ push(r0);
} else {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
@@ -2874,8 +2929,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ LoadP(ip, MemOperand(sp, 0));
- __ push(ip);
+ __ LoadP(r0, MemOperand(sp, 0));
+ __ push(r0);
__ StoreP(r3, MemOperand(sp, kPointerSize));
}
@@ -3033,8 +3088,7 @@ void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
}
-void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
- DCHECK(super_ref != NULL);
+void FullCodeGenerator::EmitLoadSuperConstructor() {
__ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Push(r3);
__ CallRuntime(Runtime::kGetPrototype, 1);
@@ -3225,20 +3279,12 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- SuperReference* super_ref = expr->expression()->AsSuperReference();
- EmitLoadSuperConstructor(super_ref);
- __ push(result_register());
-
- Variable* this_var = super_ref->this_var()->var();
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
- GetVar(r3, this_var);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- Label uninitialized_this;
- __ beq(&uninitialized_this);
- __ mov(r3, Operand(this_var->name()));
- __ push(r3);
- __ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&uninitialized_this);
+ EmitLoadSuperConstructor();
+ __ push(result_register());
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = expr->arguments();
@@ -3268,12 +3314,24 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
__ Move(r5, FeedbackVector());
__ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
- // TODO(dslomov): use a different stub and propagate new.target.
- CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+ CallConstructStub stub(isolate(), SUPER_CALL_RECORD_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+ __ Drop(1);
+
RecordJSReturnSite(expr);
+ SuperReference* super_ref = expr->expression()->AsSuperReference();
+ Variable* this_var = super_ref->this_var()->var();
+ GetVar(r4, this_var);
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
+ Label uninitialized_this;
+ __ beq(&uninitialized_this);
+ __ mov(r4, Operand(this_var->name()));
+ __ push(r4);
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&uninitialized_this);
+
EmitVariableAssignment(this_var, Token::INIT_CONST);
context()->Plug(r3);
}
@@ -3742,8 +3800,9 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ LoadP(r3, FieldMemOperand(r3, Map::kConstructorOffset));
- __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
+ Register instance_type = r5;
+ __ GetMapConstructor(r3, r3, r4, instance_type);
+ __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
__ bne(&non_function_constructor);
// r3 now contains the constructor function. Grab the
@@ -4036,7 +4095,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ b(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4078,7 +4137,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ b(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4142,6 +4201,61 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
}
+void FullCodeGenerator::EmitDefaultConstructorCallSuper(CallRuntime* expr) {
+ Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
+ GetVar(result_register(), new_target_var);
+ __ Push(result_register());
+
+ EmitLoadSuperConstructor();
+ __ mr(r4, result_register());
+ __ Push(r4);
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor_frame, args_set_up, runtime;
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ beq(&adaptor_frame);
+
+ // default constructor has no arguments, so no adaptor frame means no args.
+ __ li(r3, Operand::Zero());
+ __ b(&args_set_up);
+
+ // Copy arguments from adaptor frame.
+ {
+ __ bind(&adaptor_frame);
+ __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ SmiUntag(r3);
+
+ // Subtract 1 from arguments count, for new.target.
+ __ subi(r3, r3, Operand(1));
+
+ // Get arguments pointer in r5.
+ __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+ __ add(r5, r5, r0);
+ __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ Label loop;
+ __ mtctr(r3);
+ __ bind(&loop);
+ // Pre-decrement in order to skip receiver.
+ __ LoadPU(r6, MemOperand(r5, -kPointerSize));
+ __ Push(r6);
+ __ bdnz(&loop);
+ }
+
+ __ bind(&args_set_up);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+
+ CallConstructStub stub(isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ context()->Plug(result_register());
+}
+
+
void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
RegExpConstructResultStub stub(isolate());
ZoneList<Expression*>* args = expr->arguments();
@@ -4198,7 +4312,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ bind(&not_found);
// Call runtime to perform the lookup.
__ Push(cache, key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ bind(&done);
context()->Plug(r3);
@@ -4505,18 +4619,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- if (expr->function() != NULL &&
- expr->function()->intrinsic_type == Runtime::INLINE) {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
+ Comment cmnt(masm_, "[ CallRuntime");
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
__ LoadP(receiver, GlobalObjectOperand());
@@ -4540,7 +4647,6 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ StoreP(r3, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
- int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@@ -4555,15 +4661,29 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
context()->DropAndPlug(1, r3);
+
} else {
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ const Runtime::Function* function = expr->function();
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: { \
+ Comment cmnt(masm_, "[ Inline" #Name); \
+ return Emit##Name(expr); \
+ }
+ FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
- context()->Plug(r3);
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(r3);
+ }
+ }
}
}
@@ -5206,19 +5326,6 @@ void FullCodeGenerator::EnterFinallyBlock() {
__ mov(ip, Operand(pending_message_obj));
__ LoadP(r4, MemOperand(ip));
__ push(r4);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(ip, Operand(has_pending_message));
- __ lbz(r4, MemOperand(ip));
- __ SmiTag(r4);
- __ push(r4);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(ip, Operand(pending_message_script));
- __ LoadP(r4, MemOperand(ip));
- __ push(r4);
}
@@ -5226,19 +5333,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(r4));
// Restore pending message from stack.
__ pop(r4);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(ip, Operand(pending_message_script));
- __ StoreP(r4, MemOperand(ip));
-
- __ pop(r4);
- __ SmiUntag(r4);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(ip, Operand(has_pending_message));
- __ stb(r4, MemOperand(ip));
-
- __ pop(r4);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
@@ -5259,32 +5353,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth, int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ LoadP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- }
- __ PopTryHandler();
- __ b(finally_entry_, SetLK);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-#undef __
-
void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
BackEdgeState target_state,
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index f82d85ded0..01d0150340 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -227,6 +227,12 @@ void InternalArrayConstructorDescriptor::Initialize(
}
+void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {cp, r4, r3};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3};
data->Initialize(arraysize(registers), registers, NULL);
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/ppc/lithium-codegen-ppc.cc
index 4d17189c84..f6147c2196 100644
--- a/deps/v8/src/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/ppc/lithium-codegen-ppc.cc
@@ -7,6 +7,7 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -109,7 +110,7 @@ bool LCodeGen::GeneratePrologue() {
// r4: Callee's JS function.
// cp: Callee's context.
- // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
+ // pp: Callee's constant pool pointer (if enabled)
// fp: Caller's frame pointer.
// lr: Caller's pc.
// ip: Our own function entry (required by the prologue)
@@ -117,7 +118,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
+ if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
@@ -336,49 +337,39 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
- if (needs_frame.is_bound()) {
- __ b(&needs_frame);
- } else {
- __ bind(&needs_frame);
- Comment(";;; call deopt with frame");
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
- __ PushFixedFrame(ip);
- __ addi(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- __ bind(&call_deopt_entry);
- // Add the base address to the offset previously loaded in
- // entry_offset.
- __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
- __ add(ip, entry_offset, ip);
- __ Call(ip);
- }
+ Comment(";;; call deopt with frame");
+ __ PushFixedFrame();
+ __ b(&needs_frame, SetLK);
} else {
- // The last entry can fall through into `call_deopt_entry`, avoiding a
- // branch.
- bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
-
- if (need_branch) __ b(&call_deopt_entry);
+ __ b(&call_deopt_entry, SetLK);
}
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
}
- if (!call_deopt_entry.is_bound()) {
- Comment(";;; call deopt");
- __ bind(&call_deopt_entry);
+ if (needs_frame.is_linked()) {
+ __ bind(&needs_frame);
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+ __ push(ip);
+ __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ }
- if (info()->saves_caller_doubles()) {
- DCHECK(info()->IsStub());
- RestoreCallerDoubles();
- }
+ Comment(";;; call deopt");
+ __ bind(&call_deopt_entry);
- // Add the base address to the offset previously loaded in entry_offset.
- __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
- __ add(ip, entry_offset, ip);
- __ Call(ip);
+ if (info()->saves_caller_doubles()) {
+ DCHECK(info()->IsStub());
+ RestoreCallerDoubles();
}
+
+ // Add the base address to the offset previously loaded in entry_offset.
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
+ __ add(ip, entry_offset, ip);
+ __ Jump(ip);
}
// The deoptimization jump table is the last part of the instruction
@@ -812,14 +803,15 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
__ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
}
- Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
@@ -941,12 +933,6 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
-#if V8_OOL_CONSTANT_POOL
- if (kind & Safepoint::kWithRegisters) {
- // Register always contains a pointer to the constant pool.
- safepoint.DefinePointerRegister(kConstantPoolRegister, zone());
- }
-#endif
}
@@ -2787,10 +2773,11 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ LoadP(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+ Register instance_type = ip;
+ __ GetMapConstructor(temp, temp, temp2, instance_type);
// Objects with a non-function constructor have class 'Object'.
- __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
+ __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
__ bne(is_true);
} else {
@@ -2898,7 +2885,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
Register map = temp;
__ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
{
- // Block constant pool emission to ensure the positions of instructions are
+ // Block trampoline emission to ensure the positions of instructions are
// as expected by the patcher. See InstanceofStub::Generate().
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
__ bind(deferred->map_check()); // Label for calculating code patching.
@@ -2906,10 +2893,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// root array to force relocation to be able to later patch with
// the cached map.
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
- __ mov(ip, Operand(Handle<Object>(cell)));
- __ LoadP(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
+ __ mov(ip, Operand(cell));
+ __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(map, ip);
- __ bne(&cache_miss);
+ __ bc_short(ne, &cache_miss);
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch
// with true or false.
@@ -2957,22 +2944,25 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
LoadContextFromDeferred(instr->context());
__ Move(InstanceofStub::right(), instr->function());
- // Include instructions below in delta: mov + call = mov + (mov + 2)
- static const int kAdditionalDelta = 2 * Assembler::kMovInstructions + 2;
- int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
{
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- if (Assembler::kMovInstructions != 1 &&
- is_int16(delta * Instruction::kInstrSize)) {
- // The following mov will be an li rather than a multi-instruction form
- delta -= Assembler::kMovInstructions - 1;
- }
+ Handle<Code> code = stub.GetCode();
+ // Include instructions below in delta: bitwise_mov32 + call
+ int delta = (masm_->InstructionsGeneratedSince(map_check) + 2) *
+ Instruction::kInstrSize +
+ masm_->CallSize(code);
// r8 is used to communicate the offset to the location of the map check.
- __ mov(r8, Operand(delta * Instruction::kInstrSize));
+ if (is_int16(delta)) {
+ delta -= Instruction::kInstrSize;
+ __ li(r8, Operand(delta));
+ } else {
+ __ bitwise_mov32(r8, delta);
+ }
+ CallCodeGeneric(code, RelocInfo::CODE_TARGET, instr,
+ RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ DCHECK(delta / Instruction::kInstrSize ==
+ masm_->InstructionsGeneratedSince(map_check));
}
- CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
- RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
- DCHECK(delta == masm_->InstructionsGeneratedSince(map_check));
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Put the result value (r3) into the result register slot and
@@ -3052,18 +3042,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
- __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
- }
-}
-
-
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@@ -3093,36 +3071,12 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Register cell = scratch0();
-
- // Load the cell.
- __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We use a temp to check the payload (CompareRoot might clobber ip).
- Register payload = ToRegister(instr->temp());
- __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset));
- __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
- }
-
- // Store the value.
- __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0);
- // Cells are always rescanned, so no write barrier here.
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3235,7 +3189,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL,
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3590,7 +3546,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4529,7 +4487,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
+ Handle<Code> ic =
+ StoreIC::initialize_stub(isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4793,8 +4753,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -5508,7 +5469,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
if (isolate()->heap()->InNewSpace(*object)) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
- __ mov(ip, Operand(Handle<Object>(cell)));
+ __ mov(ip, Operand(cell));
__ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
__ cmp(reg, ip);
} else {
@@ -5519,6 +5480,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+ Register temp = ToRegister(instr->temp());
{
PushSafepointRegistersScope scope(this);
__ push(object);
@@ -5526,9 +5488,9 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(instr->pointer_map(), 1,
Safepoint::kNoLazyDeopt);
- __ StoreToSafepointRegisterSlot(r3, scratch0());
+ __ StoreToSafepointRegisterSlot(r3, temp);
}
- __ TestIfSmi(scratch0(), r0);
+ __ TestIfSmi(temp, r0);
DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
}
@@ -5560,17 +5522,14 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
return;
}
- Register map_reg = scratch0();
-
- LOperand* input = instr->value();
- DCHECK(input->IsRegister());
- Register reg = ToRegister(input);
+ Register object = ToRegister(instr->value());
+ Register map_reg = ToRegister(instr->temp());
- __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->HasMigrationTarget()) {
- deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
+ deferred = new (zone()) DeferredCheckMaps(this, instr, object);
__ bind(deferred->check_maps());
}
diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/ppc/lithium-ppc.cc
index d54c7ec46a..ec75713480 100644
--- a/deps/v8/src/ppc/lithium-ppc.cc
+++ b/deps/v8/src/ppc/lithium-ppc.cc
@@ -2029,7 +2029,9 @@ LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
LOperand* value = UseRegisterAtStart(instr->value());
- LInstruction* result = AssignEnvironment(new (zone()) LCheckMaps(value));
+ LOperand* temp = TempRegister();
+ LInstruction* result =
+ AssignEnvironment(new (zone()) LCheckMaps(value, temp));
if (instr->HasMigrationTarget()) {
info()->MarkAsDeferredCalling();
result = AssignPointerMap(result);
@@ -2096,14 +2098,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new (zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object =
@@ -2118,17 +2112,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to check the value in the cell in the case where we perform
- // a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new (zone())
- LStoreGlobalCell(value, TempRegister()))
- : new (zone()) LStoreGlobalCell(value, NULL);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/ppc/lithium-ppc.h
index ac7b505b98..5dce71cbf5 100644
--- a/deps/v8/src/ppc/lithium-ppc.h
+++ b/deps/v8/src/ppc/lithium-ppc.h
@@ -100,7 +100,6 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
@@ -142,7 +141,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1641,13 +1639,6 @@ class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@@ -1669,21 +1660,6 @@ class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
- public:
- LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
@@ -2319,11 +2295,15 @@ class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
};
-class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 1> {
public:
- explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; }
+ explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 2c9f7aa7a9..2f56d39c92 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -104,15 +104,14 @@ void MacroAssembler::CallJSEntry(Register target) {
int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
Condition cond) {
- Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
- return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize;
+ return (2 + kMovInstructions) * kInstrSize;
}
int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
RelocInfo::Mode rmode,
Condition cond) {
- return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
+ return (2 + kMovInstructions) * kInstrSize;
}
@@ -274,6 +273,7 @@ void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
Condition cond) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
DCHECK(cond == al);
StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
}
@@ -514,40 +514,28 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
void MacroAssembler::PushFixedFrame(Register marker_reg) {
mflr(r0);
-#if V8_OOL_CONSTANT_POOL
- if (marker_reg.is_valid()) {
- Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
- } else {
- Push(r0, fp, kConstantPoolRegister, cp);
- }
-#else
if (marker_reg.is_valid()) {
Push(r0, fp, cp, marker_reg);
} else {
Push(r0, fp, cp);
}
-#endif
}
void MacroAssembler::PopFixedFrame(Register marker_reg) {
-#if V8_OOL_CONSTANT_POOL
- if (marker_reg.is_valid()) {
- Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
- } else {
- Pop(r0, fp, kConstantPoolRegister, cp);
- }
-#else
if (marker_reg.is_valid()) {
Pop(r0, fp, cp, marker_reg);
} else {
Pop(r0, fp, cp);
}
-#endif
mtlr(r0);
}
+const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable;
+const int MacroAssembler::kNumSafepointSavedRegisters =
+ Register::kMaxNumAllocatableRegisters;
+
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@@ -664,41 +652,11 @@ void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
}
-#if V8_OOL_CONSTANT_POOL
-void MacroAssembler::LoadConstantPoolPointerRegister(
- CodeObjectAccessMethod access_method, int ip_code_entry_delta) {
- Register base;
- int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize;
- if (access_method == CAN_USE_IP) {
- base = ip;
- constant_pool_offset += ip_code_entry_delta;
- } else {
- DCHECK(access_method == CONSTRUCT_INTERNAL_REFERENCE);
- base = kConstantPoolRegister;
- ConstantPoolUnavailableScope constant_pool_unavailable(this);
-
- // CheckBuffer() is called too frequently. This will pre-grow
- // the buffer if needed to avoid spliting the relocation and instructions
- EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
-
- uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset();
- mov(base, Operand(code_start, RelocInfo::INTERNAL_REFERENCE));
- }
- LoadP(kConstantPoolRegister, MemOperand(base, constant_pool_offset));
-}
-#endif
-
-
void MacroAssembler::StubPrologue(int prologue_offset) {
LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
PushFixedFrame(r11);
// Adjust FP to point to saved FP.
addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-#if V8_OOL_CONSTANT_POOL
- // ip contains prologue address
- LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
- set_ool_constant_pool_available(true);
-#endif
}
@@ -731,28 +689,13 @@ void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
}
}
}
-#if V8_OOL_CONSTANT_POOL
- // ip contains prologue address
- LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
- set_ool_constant_pool_available(true);
-#endif
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
- if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
- PushFixedFrame();
-#if V8_OOL_CONSTANT_POOL
- // This path should not rely on ip containing code entry.
- LoadConstantPoolPointerRegister(CONSTRUCT_INTERNAL_REFERENCE);
-#endif
- LoadSmiLiteral(ip, Smi::FromInt(type));
- push(ip);
- } else {
- LoadSmiLiteral(ip, Smi::FromInt(type));
- PushFixedFrame(ip);
- }
+ LoadSmiLiteral(ip, Smi::FromInt(type));
+ PushFixedFrame(ip);
// Adjust FP to point to saved FP.
addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
@@ -762,24 +705,15 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
-#if V8_OOL_CONSTANT_POOL
- ConstantPoolUnavailableScope constant_pool_unavailable(this);
-#endif
// r3: preserved
// r4: preserved
// r5: preserved
// Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer, return address and constant pool pointer.
+ // the caller's state.
int frame_ends;
LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
-#if V8_OOL_CONSTANT_POOL
- const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
- const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
- const int offset = ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
- LoadP(kConstantPoolRegister, MemOperand(fp, offset));
-#endif
mtlr(r0);
frame_ends = pc_offset();
Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
@@ -826,10 +760,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
li(r8, Operand::Zero());
StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
-#if V8_OOL_CONSTANT_POOL
- StoreP(kConstantPoolRegister,
- MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
-#endif
mov(r8, Operand(CodeObject()));
StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@@ -899,9 +829,6 @@ int MacroAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length) {
-#if V8_OOL_CONSTANT_POOL
- ConstantPoolUnavailableScope constant_pool_unavailable(this);
-#endif
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@@ -1159,165 +1086,32 @@ void MacroAssembler::DebugBreak() {
}
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
+void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available.
- // We want the stack to look like
- // sp -> NextOffset
- // CodeObject
- // state
- // context
- // frame pointer
// Link the current handler as the next handler.
+ // Preserve r3-r7.
mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
LoadP(r0, MemOperand(r8));
- StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize));
+ push(r0);
+
// Set this new handler as the current one.
StoreP(sp, MemOperand(r8));
-
- if (kind == StackHandler::JS_ENTRY) {
- li(r8, Operand::Zero()); // NULL frame pointer.
- StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset));
- LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context.
- StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset));
- } else {
- // still not sure if fp is right
- StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
- StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- }
- unsigned state = StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- LoadIntLiteral(r8, state);
- StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset));
- mov(r8, Operand(CodeObject()));
- StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset));
}
-void MacroAssembler::PopTryHandler() {
+void MacroAssembler::PopStackHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+
pop(r4);
mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
StoreP(r4, MemOperand(ip));
}
-// PPC - make use of ip as a temporary register
-void MacroAssembler::JumpToHandlerEntry() {
-// Compute the handler entry address and jump to it. The handler table is
-// a fixed array of (smi-tagged) code offsets.
-// r3 = exception, r4 = code object, r5 = state.
-#if V8_OOL_CONSTANT_POOL
- ConstantPoolUnavailableScope constant_pool_unavailable(this);
- LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset));
-#endif
- LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table.
- addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index.
- slwi(ip, r5, Operand(kPointerSizeLog2));
- add(ip, r6, ip);
- LoadP(r5, MemOperand(ip)); // Smi-tagged offset.
- addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
- SmiUntag(ip, r5);
- add(r0, r4, ip);
- mtctr(r0);
- bctr();
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
- Label skip;
-
- // The exception is expected in r3.
- if (!value.is(r3)) {
- mr(r3, value);
- }
- // Drop the stack pointer to the top of the top handler.
- mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- LoadP(sp, MemOperand(r6));
- // Restore the next handler.
- pop(r5);
- StoreP(r5, MemOperand(r6));
-
- // Get the code object (r4) and state (r5). Restore the context and frame
- // pointer.
- pop(r4);
- pop(r5);
- pop(cp);
- pop(fp);
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
- // or cp.
- cmpi(cp, Operand::Zero());
- beq(&skip);
- StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- bind(&skip);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in r3.
- if (!value.is(r3)) {
- mr(r3, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- LoadP(sp, MemOperand(r6));
-
- // Unwind the handlers until the ENTRY handler is found.
- Label fetch_next, check_kind;
- b(&check_kind);
- bind(&fetch_next);
- LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset));
- andi(r0, r5, Operand(StackHandler::KindField::kMask));
- bne(&fetch_next, cr0);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(r5);
- StoreP(r5, MemOperand(r6));
- // Get the code object (r4) and state (r5). Clear the context and frame
- // pointer (0 was saved in the handler).
- pop(r4);
- pop(r5);
- pop(cp);
- pop(fp);
-
- JumpToHandlerEntry();
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch, Label* miss) {
Label same_contexts;
@@ -2107,6 +1901,20 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
}
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+ Register temp, Register temp2) {
+ Label done, loop;
+ LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
+ bind(&loop);
+ JumpIfSmi(result, &done);
+ CompareObjectType(result, temp, temp2, MAP_TYPE);
+ bne(&done);
+ LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
+ b(&loop);
+ bind(&done);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss,
bool miss_on_bound_function) {
@@ -2163,7 +1971,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- LoadP(result, FieldMemOperand(result, Map::kConstructorOffset));
+ GetMapConstructor(result, result, scratch, ip);
}
// All done.
@@ -3370,25 +3178,6 @@ void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
Register new_value) {
lwz(scratch, MemOperand(location));
-#if V8_OOL_CONSTANT_POOL
- if (emit_debug_code()) {
-// Check that the instruction sequence is a load from the constant pool
-#if V8_TARGET_ARCH_PPC64
- And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
- Cmpi(scratch, Operand(ADDI), r0);
- Check(eq, kTheInstructionShouldBeALi);
- lwz(scratch, MemOperand(location, kInstrSize));
-#endif
- ExtractBitMask(scratch, scratch, 0x1f * B16);
- cmpi(scratch, Operand(kConstantPoolRegister.code()));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
- // Scratch was clobbered. Restore it.
- lwz(scratch, MemOperand(location));
- }
- // Get the address of the constant and patch it.
- andi(scratch, scratch, Operand(kImm16Mask));
- StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
-#else
// This code assumes a FIXED_SEQUENCE for lis/ori
// At this point scratch is a lis instruction.
@@ -3465,7 +3254,6 @@ void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
#else
FlushICache(location, 2 * kInstrSize, scratch);
#endif
-#endif
}
@@ -3473,24 +3261,6 @@ void MacroAssembler::GetRelocatedValue(Register location, Register result,
Register scratch) {
lwz(result, MemOperand(location));
-#if V8_OOL_CONSTANT_POOL
- if (emit_debug_code()) {
-// Check that the instruction sequence is a load from the constant pool
-#if V8_TARGET_ARCH_PPC64
- And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
- Cmpi(result, Operand(ADDI), r0);
- Check(eq, kTheInstructionShouldBeALi);
- lwz(result, MemOperand(location, kInstrSize));
-#endif
- ExtractBitMask(result, result, 0x1f * B16);
- cmpi(result, Operand(kConstantPoolRegister.code()));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
- lwz(result, MemOperand(location));
- }
- // Get the address of the constant and retrieve it.
- andi(result, result, Operand(kImm16Mask));
- LoadPX(result, MemOperand(kConstantPoolRegister, result));
-#else
// This code assumes a FIXED_SEQUENCE for lis/ori
if (emit_debug_code()) {
And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
@@ -3543,7 +3313,6 @@ void MacroAssembler::GetRelocatedValue(Register location, Register result,
sldi(result, result, Operand(16));
rldimi(result, scratch, 0, 48);
#endif
-#endif
}
@@ -3929,23 +3698,6 @@ void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
Register scratch) {
-#if V8_OOL_CONSTANT_POOL
- // TODO(mbrandy): enable extended constant pool usage for doubles.
- // See ARM commit e27ab337 for a reference.
- if (is_ool_constant_pool_available() && !is_constant_pool_full()) {
- RelocInfo rinfo(pc_, value);
- ConstantPoolAddEntry(rinfo);
-#if V8_TARGET_ARCH_PPC64
- // We use 2 instruction sequence here for consistency with mov.
- li(scratch, Operand::Zero());
- lfdx(result, MemOperand(kConstantPoolRegister, scratch));
-#else
- lfd(result, MemOperand(kConstantPoolRegister, 0));
-#endif
- return;
- }
-#endif
-
// avoid gcc strict aliasing error using union cast
union {
double dval;
@@ -4081,6 +3833,46 @@ void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
#endif
+void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
+ Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ mffprd(scratch, dst);
+ rldimi(scratch, src, 0, 32);
+ mtfprd(dst, scratch);
+ return;
+ }
+#endif
+
+ subi(sp, sp, Operand(kDoubleSize));
+ stfd(dst, MemOperand(sp));
+ stw(src, MemOperand(sp, Register::kMantissaOffset));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lfd(dst, MemOperand(sp));
+ addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
+ Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+ if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+ mffprd(scratch, dst);
+ rldimi(scratch, src, 32, 0);
+ mtfprd(dst, scratch);
+ return;
+ }
+#endif
+
+ subi(sp, sp, Operand(kDoubleSize));
+ stfd(dst, MemOperand(sp));
+ stw(src, MemOperand(sp, Register::kExponentOffset));
+ nop(GROUP_ENDING_NOP); // LHS/RAW optimization
+ lfd(dst, MemOperand(sp));
+ addi(sp, sp, Operand(kDoubleSize));
+}
+
+
void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
#if V8_TARGET_ARCH_PPC64
if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
index 146489d131..04e9bd85bd 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/ppc/macro-assembler-ppc.h
@@ -102,9 +102,7 @@ class MacroAssembler : public Assembler {
MacroAssembler(Isolate* isolate, void* buffer, int size);
- // Returns the size of a call in instructions. Note, the value returned is
- // only valid as long as no entries are added to the constant pool between
- // checking the call size and emitting the actual call.
+ // Returns the size of a call in instructions.
static int CallSize(Register target);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
static int CallSizeNotPredictableCodeSize(Address target,
@@ -379,8 +377,9 @@ class MacroAssembler : public Assembler {
void Prologue(bool code_pre_aging, int prologue_offset = 0);
// Enter exit frame.
- // stack_space - extra stack space, used for alignment before call to C.
- void EnterExitFrame(bool save_doubles, int stack_space = 0);
+ // stack_space - extra stack space, used for parameters before call to C.
+ // At least one slot (for the return address) should be provided.
+ void EnterExitFrame(bool save_doubles, int stack_space = 1);
// Leave the current exit frame. Expects the return value in r0.
// Expect the number of values, pushed prior to the exit frame, to
@@ -464,6 +463,8 @@ class MacroAssembler : public Assembler {
void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
Register src_lo, Register scratch);
#endif
+ void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
+ void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
void MovDoubleLowToInt(Register dst, DoubleRegister src);
void MovDoubleHighToInt(Register dst, DoubleRegister src);
void MovDoubleToInt64(
@@ -543,19 +544,12 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
- // Unlink the stack handler on top of the stack from the try handler chain.
+ // Unlink the stack handler on top of the stack from the stack handler chain.
// Must preserve the result register.
- void PopTryHandler();
-
- // Passes thrown value to the handler of top of the try handler chain.
- void Throw(Register value);
-
- // Propagates an uncatchable exception to the top of the current JS stack's
- // handler chain.
- void ThrowUncatchable(Register value);
+ void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -684,6 +678,11 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Support functions.
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done, and |temp2| its instance type.
+ void GetMapConstructor(Register result, Register map, Register temp,
+ Register temp2);
+
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
@@ -1361,7 +1360,7 @@ class MacroAssembler : public Assembler {
// ---------------------------------------------------------------------------
// Patching helpers.
- // Retrieve/patch the relocated value (lis/ori pair or constant pool load).
+ // Retrieve/patch the relocated value (lis/ori pair).
void GetRelocatedValue(Register location, Register result, Register scratch);
void SetRelocatedValue(Register location, Register scratch,
Register new_value);
@@ -1481,22 +1480,14 @@ class MacroAssembler : public Assembler {
inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
Register mask_reg);
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
+ static const RegList kSafepointSavedRegisters;
+ static const int kNumSafepointSavedRegisters;
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
-#if V8_OOL_CONSTANT_POOL
- // Loads the constant pool pointer (kConstantPoolRegister).
- enum CodeObjectAccessMethod { CAN_USE_IP, CONSTRUCT_INTERNAL_REFERENCE };
- void LoadConstantPoolPointerRegister(CodeObjectAccessMethod access_method,
- int ip_code_entry_delta = 0);
-#endif
-
bool generating_stub_;
bool has_frame_;
// This handle will be patched with the code object on installation.
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index 0bb2da05ff..9b29004f3d 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -11,6 +11,7 @@
#if V8_TARGET_ARCH_PPC
#include "src/assembler.h"
+#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/ppc/constants-ppc.h"
@@ -2998,8 +2999,7 @@ void Simulator::ExecuteExt5(Instruction* instr) {
int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
DCHECK(sh >= 0 && sh <= 63);
DCHECK(mb >= 0 && mb <= 63);
- // rotate left
- uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
uintptr_t mask = 0xffffffffffffffff >> mb;
result &= mask;
set_register(ra, result);
@@ -3016,8 +3016,7 @@ void Simulator::ExecuteExt5(Instruction* instr) {
int me = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
DCHECK(sh >= 0 && sh <= 63);
DCHECK(me >= 0 && me <= 63);
- // rotate left
- uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
uintptr_t mask = 0xffffffffffffffff << (63 - me);
result &= mask;
set_register(ra, result);
@@ -3034,8 +3033,7 @@ void Simulator::ExecuteExt5(Instruction* instr) {
int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
DCHECK(sh >= 0 && sh <= 63);
DCHECK(mb >= 0 && mb <= 63);
- // rotate left
- uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
uintptr_t mask = (0xffffffffffffffff >> mb) & (0xffffffffffffffff << sh);
result &= mask;
set_register(ra, result);
@@ -3052,8 +3050,7 @@ void Simulator::ExecuteExt5(Instruction* instr) {
int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
int me = 63 - sh;
- // rotate left
- uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
uintptr_t mask = 0;
if (mb < me + 1) {
uintptr_t bit = 0x8000000000000000 >> mb;
@@ -3092,8 +3089,7 @@ void Simulator::ExecuteExt5(Instruction* instr) {
int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
DCHECK(sh >= 0 && sh <= 63);
DCHECK(mb >= 0 && mb <= 63);
- // rotate left
- uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+ uintptr_t result = base::bits::RotateLeft64(rs_val, sh);
uintptr_t mask = 0xffffffffffffffff >> mb;
result &= mask;
set_register(ra, result);
@@ -3268,8 +3264,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int sh = instr->Bits(15, 11);
int mb = instr->Bits(10, 6);
int me = instr->Bits(5, 1);
- // rotate left
- uint32_t result = (rs_val << sh) | (rs_val >> (32 - sh));
+ uint32_t result = base::bits::RotateLeft32(rs_val, sh);
int mask = 0;
if (mb < me + 1) {
int bit = 0x80000000 >> mb;
@@ -3311,8 +3306,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
int mb = instr->Bits(10, 6);
int me = instr->Bits(5, 1);
- // rotate left
- uint32_t result = (rs_val << sh) | (rs_val >> (32 - sh));
+ uint32_t result = base::bits::RotateLeft32(rs_val, sh);
int mask = 0;
if (mb < me + 1) {
int bit = 0x80000000 >> mb;
diff --git a/deps/v8/src/preparse-data-format.h b/deps/v8/src/preparse-data-format.h
index 391a351071..de106939c7 100644
--- a/deps/v8/src/preparse-data-format.h
+++ b/deps/v8/src/preparse-data-format.h
@@ -27,7 +27,7 @@ struct PreparseDataConstants {
static const int kMessageStartPos = 0;
static const int kMessageEndPos = 1;
static const int kMessageArgCountPos = 2;
- static const int kIsReferenceErrorPos = 3;
+ static const int kParseErrorTypePos = 3;
static const int kMessageTextPos = 4;
static const unsigned char kNumberTerminator = 0x80u;
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc
index a66a1adcf9..e1c7ad199b 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/preparse-data.cc
@@ -3,9 +3,9 @@
// found in the LICENSE file.
#include "src/base/logging.h"
-#include "src/compiler.h"
#include "src/globals.h"
#include "src/hashmap.h"
+#include "src/parser.h"
#include "src/preparse-data.h"
#include "src/preparse-data-format.h"
@@ -28,11 +28,10 @@ CompleteParserRecorder::CompleteParserRecorder() {
}
-void CompleteParserRecorder::LogMessage(int start_pos,
- int end_pos,
+void CompleteParserRecorder::LogMessage(int start_pos, int end_pos,
const char* message,
const char* arg_opt,
- bool is_reference_error) {
+ ParseErrorType error_type) {
if (HasError()) return;
preamble_[PreparseDataConstants::kHasErrorOffset] = true;
function_store_.Reset();
@@ -42,8 +41,8 @@ void CompleteParserRecorder::LogMessage(int start_pos,
function_store_.Add(end_pos);
STATIC_ASSERT(PreparseDataConstants::kMessageArgCountPos == 2);
function_store_.Add((arg_opt == NULL) ? 0 : 1);
- STATIC_ASSERT(PreparseDataConstants::kIsReferenceErrorPos == 3);
- function_store_.Add(is_reference_error ? 1 : 0);
+ STATIC_ASSERT(PreparseDataConstants::kParseErrorTypePos == 3);
+ function_store_.Add(error_type);
STATIC_ASSERT(PreparseDataConstants::kMessageTextPos == 4);
WriteString(CStrVector(message));
if (arg_opt != NULL) WriteString(CStrVector(arg_opt));
diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h
index 0d784991c2..d78c3ed8dc 100644
--- a/deps/v8/src/preparse-data.h
+++ b/deps/v8/src/preparse-data.h
@@ -13,8 +13,37 @@
namespace v8 {
namespace internal {
-class ScriptData;
+class ScriptData {
+ public:
+ ScriptData(const byte* data, int length);
+ ~ScriptData() {
+ if (owns_data_) DeleteArray(data_);
+ }
+
+ const byte* data() const { return data_; }
+ int length() const { return length_; }
+ bool rejected() const { return rejected_; }
+
+ void Reject() { rejected_ = true; }
+
+ void AcquireDataOwnership() {
+ DCHECK(!owns_data_);
+ owns_data_ = true;
+ }
+
+ void ReleaseDataOwnership() {
+ DCHECK(owns_data_);
+ owns_data_ = false;
+ }
+ private:
+ bool owns_data_ : 1;
+ bool rejected_ : 1;
+ const byte* data_;
+ int length_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScriptData);
+};
// Abstract interface for preparse data recorder.
class ParserRecorder {
@@ -30,11 +59,10 @@ class ParserRecorder {
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
// representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
+ virtual void LogMessage(int start, int end, const char* message,
const char* argument_opt,
- bool is_reference_error) = 0;
+ ParseErrorType error_type) = 0;
+
private:
DISALLOW_COPY_AND_ASSIGN(ParserRecorder);
};
@@ -43,7 +71,7 @@ class ParserRecorder {
class SingletonLogger : public ParserRecorder {
public:
SingletonLogger()
- : has_error_(false), start_(-1), end_(-1), is_reference_error_(false) {}
+ : has_error_(false), start_(-1), end_(-1), error_type_(kSyntaxError) {}
virtual ~SingletonLogger() {}
void Reset() { has_error_ = false; }
@@ -63,18 +91,15 @@ class SingletonLogger : public ParserRecorder {
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
// representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt,
- bool is_reference_error) {
+ virtual void LogMessage(int start, int end, const char* message,
+ const char* argument_opt, ParseErrorType error_type) {
if (has_error_) return;
has_error_ = true;
start_ = start;
end_ = end;
message_ = message;
argument_opt_ = argument_opt;
- is_reference_error_ = is_reference_error;
+ error_type_ = error_type;
}
bool has_error() const { return has_error_; }
@@ -97,7 +122,10 @@ class SingletonLogger : public ParserRecorder {
DCHECK(!has_error_);
return scope_uses_super_property_;
}
- int is_reference_error() const { return is_reference_error_; }
+ ParseErrorType error_type() const {
+ DCHECK(has_error_);
+ return error_type_;
+ }
const char* message() {
DCHECK(has_error_);
return message_;
@@ -119,7 +147,7 @@ class SingletonLogger : public ParserRecorder {
// For error messages.
const char* message_;
const char* argument_opt_;
- bool is_reference_error_;
+ ParseErrorType error_type_;
};
@@ -147,11 +175,8 @@ class CompleteParserRecorder : public ParserRecorder {
// Logs an error message and marks the log as containing an error.
// Further logging will be ignored, and ExtractData will return a vector
// representing the error only.
- virtual void LogMessage(int start,
- int end,
- const char* message,
- const char* argument_opt,
- bool is_reference_error_);
+ virtual void LogMessage(int start, int end, const char* message,
+ const char* argument_opt, ParseErrorType error_type);
ScriptData* GetScriptData();
bool HasError() {
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 154a9ae527..5a6a094f6a 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -21,24 +21,16 @@ namespace v8 {
namespace internal {
void PreParserTraits::ReportMessageAt(Scanner::Location location,
- const char* message,
- const char* arg,
- bool is_reference_error) {
- ReportMessageAt(location.beg_pos,
- location.end_pos,
- message,
- arg,
- is_reference_error);
+ const char* message, const char* arg,
+ ParseErrorType error_type) {
+ ReportMessageAt(location.beg_pos, location.end_pos, message, arg, error_type);
}
-void PreParserTraits::ReportMessageAt(int start_pos,
- int end_pos,
- const char* message,
- const char* arg,
- bool is_reference_error) {
- pre_parser_->log_->LogMessage(start_pos, end_pos, message, arg,
- is_reference_error);
+void PreParserTraits::ReportMessageAt(int start_pos, int end_pos,
+ const char* message, const char* arg,
+ ParseErrorType error_type) {
+ pre_parser_->log_->LogMessage(start_pos, end_pos, message, arg, error_type);
}
@@ -182,7 +174,6 @@ PreParser::Statement PreParser::ParseStatementListItem(bool* ok) {
case Token::CONST:
return ParseVariableStatement(kStatementListItem, ok);
case Token::LET:
- DCHECK(allow_harmony_scoping());
if (is_strict(language_mode())) {
return ParseVariableStatement(kStatementListItem, ok);
}
@@ -202,8 +193,19 @@ void PreParser::ParseStatementList(int end_token, bool* ok) {
if (directive_prologue && peek() != Token::STRING) {
directive_prologue = false;
}
+ Token::Value token = peek();
+ Scanner::Location old_super_loc = function_state_->super_call_location();
Statement statement = ParseStatementListItem(ok);
if (!*ok) return;
+ Scanner::Location super_loc = function_state_->super_call_location();
+ if (is_strong(language_mode()) &&
+ i::IsConstructor(function_state_->kind()) &&
+ !old_super_loc.IsValid() && super_loc.IsValid() &&
+ token != Token::SUPER) {
+ ReportMessageAt(super_loc, "strong_super_call_nested");
+ *ok = false;
+ return;
+ }
if (directive_prologue) {
if (statement.IsUseStrictLiteral()) {
scope_->SetLanguageMode(
@@ -391,7 +393,7 @@ PreParser::Statement PreParser::ParseBlock(bool* ok) {
//
Expect(Token::LBRACE, CHECK_OK);
while (peek() != Token::RBRACE) {
- if (allow_harmony_scoping() && is_strict(language_mode())) {
+ if (is_strict(language_mode())) {
ParseStatementListItem(CHECK_OK);
} else {
ParseStatement(CHECK_OK);
@@ -464,12 +466,6 @@ PreParser::Statement PreParser::ParseVariableDeclarations(
Consume(Token::CONST);
if (is_strict(language_mode())) {
DCHECK(var_context != kStatement);
- if (!allow_harmony_scoping()) {
- Scanner::Location location = scanner()->peek_location();
- ReportMessageAt(location, "strict_const");
- *ok = false;
- return Statement::Default();
- }
is_strict_const = true;
require_initializer = var_context != kForStatement;
}
@@ -617,6 +613,7 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
// reporting any errors on it, because of the way errors are
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
+ function_state_->set_return_location(scanner()->location());
// An ECMAScript program is considered syntactically incorrect if it
// contains a return statement that is not within the body of a
@@ -628,6 +625,14 @@ PreParser::Statement PreParser::ParseReturnStatement(bool* ok) {
tok != Token::SEMICOLON &&
tok != Token::RBRACE &&
tok != Token::EOS) {
+ if (is_strong(language_mode()) &&
+ i::IsConstructor(function_state_->kind())) {
+ int pos = peek_position();
+ ReportMessageAt(Scanner::Location(pos, pos + 1),
+ "strong_constructor_return_value");
+ *ok = false;
+ return Statement::Default();
+ }
ParseExpression(true, CHECK_OK);
}
ExpectSemicolon(CHECK_OK);
@@ -678,7 +683,7 @@ PreParser::Statement PreParser::ParseSwitchStatement(bool* ok) {
while (token != Token::CASE &&
token != Token::DEFAULT &&
token != Token::RBRACE) {
- ParseStatement(CHECK_OK);
+ ParseStatementListItem(CHECK_OK);
token = peek();
}
}
@@ -953,6 +958,15 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
}
+ if (is_strong(language_mode()) && IsSubclassConstructor(kind)) {
+ if (!function_state.super_call_location().IsValid()) {
+ ReportMessageAt(function_name_location, "strong_super_call_missing",
+ kReferenceError);
+ *ok = false;
+ return Expression::Default();
+ }
+ }
+
return Expression::Default();
}
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index f7f532372b..08963c9425 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -100,7 +100,6 @@ class ParserBase : public Traits {
return allow_harmony_arrow_functions_;
}
bool allow_harmony_modules() const { return scanner()->HarmonyModules(); }
- bool allow_harmony_scoping() const { return scanner()->HarmonyScoping(); }
bool allow_harmony_numeric_literals() const {
return scanner()->HarmonyNumericLiterals();
}
@@ -108,7 +107,6 @@ class ParserBase : public Traits {
bool allow_harmony_object_literals() const {
return allow_harmony_object_literals_;
}
- bool allow_harmony_templates() const { return scanner()->HarmonyTemplates(); }
bool allow_harmony_sloppy() const { return allow_harmony_sloppy_; }
bool allow_harmony_unicode() const { return scanner()->HarmonyUnicode(); }
bool allow_harmony_computed_property_names() const {
@@ -130,9 +128,6 @@ class ParserBase : public Traits {
void set_allow_harmony_modules(bool allow) {
scanner()->SetHarmonyModules(allow);
}
- void set_allow_harmony_scoping(bool allow) {
- scanner()->SetHarmonyScoping(allow);
- }
void set_allow_harmony_numeric_literals(bool allow) {
scanner()->SetHarmonyNumericLiterals(allow);
}
@@ -142,9 +137,6 @@ class ParserBase : public Traits {
void set_allow_harmony_object_literals(bool allow) {
allow_harmony_object_literals_ = allow;
}
- void set_allow_harmony_templates(bool allow) {
- scanner()->SetHarmonyTemplates(allow);
- }
void set_allow_harmony_sloppy(bool allow) {
allow_harmony_sloppy_ = allow;
}
@@ -212,7 +204,7 @@ class ParserBase : public Traits {
return next_materialized_literal_index_++;
}
int materialized_literal_count() {
- return next_materialized_literal_index_ - JSFunction::kLiteralsPrefixSize;
+ return next_materialized_literal_index_;
}
int NextHandlerIndex() { return next_handler_index_++; }
@@ -221,6 +213,17 @@ class ParserBase : public Traits {
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
+ Scanner::Location return_location() const { return return_location_; }
+ Scanner::Location super_call_location() const {
+ return super_call_location_;
+ }
+ void set_return_location(Scanner::Location location) {
+ return_location_ = location;
+ }
+ void set_super_call_location(Scanner::Location location) {
+ super_call_location_ = location;
+ }
+
bool is_generator() const { return IsGeneratorFunction(kind_); }
FunctionKind kind() const { return kind_; }
@@ -251,6 +254,12 @@ class ParserBase : public Traits {
// Properties count estimation.
int expected_property_count_;
+ // Location of most recent 'return' statement (invalid if none).
+ Scanner::Location return_location_;
+
+ // Location of call to the "super" constructor (invalid if none).
+ Scanner::Location super_call_location_;
+
FunctionKind kind_;
// For generators, this variable may hold the generator object. It variable
// is used by yield expressions and return statements. It is not necessary
@@ -317,10 +326,9 @@ class ParserBase : public Traits {
DCHECK(scope_type != MODULE_SCOPE || allow_harmony_modules());
DCHECK((scope_type == FUNCTION_SCOPE && IsValidFunctionKind(kind)) ||
kind == kNormalFunction);
- Scope* result =
- new (zone()) Scope(zone(), parent, scope_type, ast_value_factory());
- bool uninitialized_this = IsSubclassConstructor(kind);
- result->Initialize(uninitialized_this);
+ Scope* result = new (zone())
+ Scope(zone(), parent, scope_type, ast_value_factory(), kind);
+ result->Initialize();
return result;
}
@@ -527,19 +535,19 @@ class ParserBase : public Traits {
// Report syntax errors.
void ReportMessage(const char* message, const char* arg = NULL,
- bool is_reference_error = false) {
+ ParseErrorType error_type = kSyntaxError) {
Scanner::Location source_location = scanner()->location();
- Traits::ReportMessageAt(source_location, message, arg, is_reference_error);
+ Traits::ReportMessageAt(source_location, message, arg, error_type);
}
void ReportMessageAt(Scanner::Location location, const char* message,
- bool is_reference_error = false) {
- Traits::ReportMessageAt(location, message,
- reinterpret_cast<const char*>(0),
- is_reference_error);
+ ParseErrorType error_type = kSyntaxError) {
+ Traits::ReportMessageAt(location, message, reinterpret_cast<const char*>(0),
+ error_type);
}
void ReportUnexpectedToken(Token::Value token);
+ void ReportUnexpectedTokenAt(Scanner::Location location, Token::Value token);
// Recursive descent functions:
@@ -821,11 +829,6 @@ class PreParserExpression {
ExpressionTypeField::encode(kThisExpression));
}
- static PreParserExpression Super() {
- return PreParserExpression(TypeField::encode(kExpression) |
- ExpressionTypeField::encode(kSuperExpression));
- }
-
static PreParserExpression ThisProperty() {
return PreParserExpression(
TypeField::encode(kExpression) |
@@ -958,7 +961,6 @@ class PreParserExpression {
kThisPropertyExpression,
kPropertyExpression,
kCallExpression,
- kSuperExpression,
kNoTemplateTagExpression
};
@@ -1345,15 +1347,12 @@ class PreParserTraits {
}
// Reporting errors.
- void ReportMessageAt(Scanner::Location location,
- const char* message,
+ void ReportMessageAt(Scanner::Location location, const char* message,
const char* arg = NULL,
- bool is_reference_error = false);
- void ReportMessageAt(int start_pos,
- int end_pos,
- const char* message,
+ ParseErrorType error_type = kSyntaxError);
+ void ReportMessageAt(int start_pos, int end_pos, const char* message,
const char* arg = NULL,
- bool is_reference_error = false);
+ ParseErrorType error_type = kSyntaxError);
// "null" return type creators.
static PreParserIdentifier EmptyIdentifier() {
@@ -1403,7 +1402,7 @@ class PreParserTraits {
static PreParserExpression SuperReference(Scope* scope,
PreParserFactory* factory) {
- return PreParserExpression::Super();
+ return PreParserExpression::Default();
}
static PreParserExpression DefaultConstructor(bool call_super, Scope* scope,
@@ -1418,8 +1417,8 @@ class PreParserTraits {
}
static PreParserExpression ExpressionFromIdentifier(
- PreParserIdentifier name, int pos, Scope* scope,
- PreParserFactory* factory) {
+ PreParserIdentifier name, int start_position, int end_position,
+ Scope* scope, PreParserFactory* factory) {
return PreParserExpression::FromIdentifier(name);
}
@@ -1668,9 +1667,11 @@ template <class Traits>
ParserBase<Traits>::FunctionState::FunctionState(
FunctionState** function_state_stack, Scope** scope_stack, Scope* scope,
FunctionKind kind, typename Traits::Type::Factory* factory)
- : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+ : next_materialized_literal_index_(0),
next_handler_index_(0),
expected_property_count_(0),
+ return_location_(Scanner::Location::invalid()),
+ super_call_location_(Scanner::Location::invalid()),
kind_(kind),
generator_object_variable_(NULL),
function_state_stack_(function_state_stack),
@@ -1692,12 +1693,19 @@ ParserBase<Traits>::FunctionState::~FunctionState() {
template<class Traits>
void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
- Scanner::Location source_location = scanner()->location();
+ return ReportUnexpectedTokenAt(scanner_->location(), token);
+}
+
+
+template<class Traits>
+void ParserBase<Traits>::ReportUnexpectedTokenAt(
+ Scanner::Location source_location, Token::Value token) {
// Four of the tokens are treated specially
switch (token) {
case Token::EOS:
return ReportMessageAt(source_location, "unexpected_eos");
+ case Token::SMI:
case Token::NUMBER:
return ReportMessageAt(source_location, "unexpected_token_number");
case Token::STRING:
@@ -1869,23 +1877,26 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
// '(' Expression ')'
// TemplateLiteral
- int pos = peek_position();
+ int beg_pos = scanner()->peek_location().beg_pos;
+ int end_pos = scanner()->peek_location().end_pos;
ExpressionT result = this->EmptyExpression();
Token::Value token = peek();
switch (token) {
case Token::THIS: {
Consume(Token::THIS);
scope_->RecordThisUsage();
- result = this->ThisExpression(scope_, factory(), pos);
+ result = this->ThisExpression(scope_, factory(), beg_pos);
break;
}
case Token::NULL_LITERAL:
case Token::TRUE_LITERAL:
case Token::FALSE_LITERAL:
+ case Token::SMI:
case Token::NUMBER:
Next();
- result = this->ExpressionFromLiteral(token, pos, scanner(), factory());
+ result =
+ this->ExpressionFromLiteral(token, beg_pos, scanner(), factory());
break;
case Token::IDENTIFIER:
@@ -1895,13 +1906,14 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
case Token::FUTURE_STRICT_RESERVED_WORD: {
// Using eval or arguments in this context is OK even in strict mode.
IdentifierT name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
- result = this->ExpressionFromIdentifier(name, pos, scope_, factory());
+ result = this->ExpressionFromIdentifier(name, beg_pos, end_pos, scope_,
+ factory());
break;
}
case Token::STRING: {
Consume(Token::STRING);
- result = this->ExpressionFromString(pos, scanner(), factory());
+ result = this->ExpressionFromString(beg_pos, scanner(), factory());
break;
}
@@ -1928,7 +1940,7 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
// for which an empty parameter list "()" is valid input.
Consume(Token::RPAREN);
result = this->ParseArrowFunctionLiteral(
- pos, this->EmptyArrowParamList(), CHECK_OK);
+ beg_pos, this->EmptyArrowParamList(), CHECK_OK);
} else {
// Heuristically try to detect immediately called functions before
// seeing the call parentheses.
@@ -1963,8 +1975,8 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
case Token::TEMPLATE_SPAN:
case Token::TEMPLATE_TAIL:
- result =
- this->ParseTemplateLiteral(Traits::NoTemplateTag(), pos, CHECK_OK);
+ result = this->ParseTemplateLiteral(Traits::NoTemplateTag(), beg_pos,
+ CHECK_OK);
break;
case Token::MOD:
@@ -2017,6 +2029,11 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseArrayLiteral(
while (peek() != Token::RBRACK) {
ExpressionT elem = this->EmptyExpression();
if (peek() == Token::COMMA) {
+ if (is_strong(language_mode())) {
+ ReportMessageAt(scanner()->peek_location(), "strong_ellision");
+ *ok = false;
+ return this->EmptyExpression();
+ }
elem = this->GetLiteralTheHole(peek_position(), factory());
} else {
elem = this->ParseAssignmentExpression(true, CHECK_OK);
@@ -2057,6 +2074,11 @@ typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParsePropertyName(
*name = this->GetSymbol(scanner());
break;
+ case Token::SMI:
+ Consume(Token::SMI);
+ *name = this->GetNumberAsSymbol(scanner());
+ break;
+
case Token::NUMBER:
Consume(Token::NUMBER);
*name = this->GetNumberAsSymbol(scanner());
@@ -2105,7 +2127,8 @@ ParserBase<Traits>::ParsePropertyDefinition(ObjectLiteralCheckerBase* checker,
bool is_generator = allow_harmony_object_literals_ && Check(Token::MUL);
Token::Value name_token = peek();
- int next_pos = peek_position();
+ int next_beg_pos = scanner()->peek_location().beg_pos;
+ int next_end_pos = scanner()->peek_location().end_pos;
ExpressionT name_expression = ParsePropertyName(
&name, &is_get, &is_set, &name_is_static, is_computed_name,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
@@ -2143,6 +2166,8 @@ ParserBase<Traits>::ParsePropertyDefinition(ObjectLiteralCheckerBase* checker,
: FunctionKind::kBaseConstructor;
}
+ if (!in_class) kind = WithObjectLiteralBit(kind);
+
value = this->ParseFunctionLiteral(
name, scanner()->location(),
false, // reserved words are allowed here
@@ -2174,11 +2199,12 @@ ParserBase<Traits>::ParsePropertyDefinition(ObjectLiteralCheckerBase* checker,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
}
+ FunctionKind kind = FunctionKind::kAccessorFunction;
+ if (!in_class) kind = WithObjectLiteralBit(kind);
typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
name, scanner()->location(),
false, // reserved words are allowed here
- FunctionKind::kAccessorFunction, RelocInfo::kNoPosition,
- FunctionLiteral::ANONYMOUS_EXPRESSION,
+ kind, RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
is_get ? FunctionLiteral::GETTER_ARITY : FunctionLiteral::SETTER_ARITY,
CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
@@ -2200,7 +2226,8 @@ ParserBase<Traits>::ParsePropertyDefinition(ObjectLiteralCheckerBase* checker,
this->is_generator())) {
DCHECK(!*is_computed_name);
DCHECK(!is_static);
- value = this->ExpressionFromIdentifier(name, next_pos, scope_, factory());
+ value = this->ExpressionFromIdentifier(name, next_beg_pos, next_end_pos,
+ scope_, factory());
return factory()->NewObjectLiteralProperty(
name_expression, value, ObjectLiteralProperty::COMPUTED, false, false);
@@ -2301,13 +2328,17 @@ typename Traits::Type::ExpressionList ParserBase<Traits>::ParseArguments(
*ok = false;
return this->NullExpressionList();
}
- done = (peek() == Token::RPAREN);
+ done = (peek() != Token::COMMA);
if (!done) {
- // Need {} because of the CHECK_OK_CUSTOM macro.
- Expect(Token::COMMA, CHECK_OK_CUSTOM(NullExpressionList));
+ Next();
}
}
- Expect(Token::RPAREN, CHECK_OK_CUSTOM(NullExpressionList));
+ Scanner::Location location = scanner_->location();
+ if (Token::RPAREN != Next()) {
+ ReportMessageAt(location, "unterminated_arg_list");
+ *ok = false;
+ return this->NullExpressionList();
+ }
return result;
}
@@ -2637,23 +2668,6 @@ ParserBase<Traits>::ParseLeftHandSideExpression(bool* ok) {
break;
}
- case Token::TEMPLATE_SPAN:
- case Token::TEMPLATE_TAIL: {
- int pos;
- if (scanner()->current_token() == Token::IDENTIFIER) {
- pos = position();
- } else {
- pos = peek_position();
- if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
- // If the tag function looks like an IIFE, set_parenthesized() to
- // force eager compilation.
- result->AsFunctionLiteral()->set_parenthesized();
- }
- }
- result = ParseTemplateLiteral(result, pos, CHECK_OK);
- break;
- }
-
case Token::PERIOD: {
Consume(Token::PERIOD);
int pos = position();
@@ -2724,7 +2738,7 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseMemberExpression(bool* ok) {
// MemberExpression ::
// (PrimaryExpression | FunctionLiteral | ClassLiteral)
- // ('[' Expression ']' | '.' Identifier | Arguments)*
+ // ('[' Expression ']' | '.' Identifier | Arguments | TemplateLiteral)*
// The '[' Expression ']' and '.' Identifier parts are parsed by
// ParseMemberExpressionContinuation, and the Arguments part is parsed by the
@@ -2786,6 +2800,19 @@ ParserBase<Traits>::ParseSuperExpression(bool is_new, bool* ok) {
// new super() is never allowed.
// super() is only allowed in derived constructor
if (!is_new && peek() == Token::LPAREN && IsSubclassConstructor(kind)) {
+ if (is_strong(language_mode())) {
+ if (function_state->super_call_location().IsValid()) {
+ ReportMessageAt(scanner()->location(), "strong_super_call_duplicate");
+ *ok = false;
+ return this->EmptyExpression();
+ } else if (function_state->return_location().IsValid()) {
+ ReportMessageAt(function_state->return_location(),
+ "strong_constructor_return_misplaced");
+ *ok = false;
+ return this->EmptyExpression();
+ }
+ }
+ function_state->set_super_call_location(scanner()->location());
return this->SuperReference(scope_, factory());
}
}
@@ -2801,7 +2828,7 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseMemberExpressionContinuation(ExpressionT expression,
bool* ok) {
// Parses this part of MemberExpression:
- // ('[' Expression ']' | '.' Identifier)*
+ // ('[' Expression ']' | '.' Identifier | TemplateLiteral)*
while (true) {
switch (peek()) {
case Token::LBRACK: {
@@ -2826,6 +2853,22 @@ ParserBase<Traits>::ParseMemberExpressionContinuation(ExpressionT expression,
}
break;
}
+ case Token::TEMPLATE_SPAN:
+ case Token::TEMPLATE_TAIL: {
+ int pos;
+ if (scanner()->current_token() == Token::IDENTIFIER) {
+ pos = position();
+ } else {
+ pos = peek_position();
+ if (expression->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
+ // If the tag function looks like an IIFE, set_parenthesized() to
+ // force eager compilation.
+ expression->AsFunctionLiteral()->set_parenthesized();
+ }
+ }
+ expression = ParseTemplateLiteral(expression, pos, CHECK_OK);
+ break;
+ }
default:
return expression;
}
@@ -2840,12 +2883,22 @@ typename ParserBase<Traits>::ExpressionT
ParserBase<Traits>::ParseArrowFunctionLiteral(int start_pos,
ExpressionT params_ast,
bool* ok) {
+ if (peek() == Token::ARROW && scanner_->HasAnyLineTerminatorBeforeNext()) {
+ // ASI inserts `;` after arrow parameters if a line terminator is found.
+ // `=> ...` is never a valid expression, so report as syntax error.
+ // If next token is not `=>`, it's a syntax error anyways.
+ ReportUnexpectedTokenAt(scanner_->peek_location(), Token::ARROW);
+ *ok = false;
+ return this->EmptyExpression();
+ }
+
Scope* scope = this->NewScope(scope_, ARROW_SCOPE);
typename Traits::Type::StatementList body;
int num_parameters = -1;
int materialized_literal_count = -1;
int expected_property_count = -1;
int handler_count = 0;
+ Scanner::Location super_loc;
{
typename Traits::Type::Factory function_factory(ast_value_factory());
@@ -2901,6 +2954,7 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(int start_pos,
expected_property_count = function_state.expected_property_count();
handler_count = function_state.handler_count();
}
+ super_loc = function_state.super_call_location();
scope->set_start_position(start_pos);
scope->set_end_position(scanner()->location().end_pos);
@@ -2918,10 +2972,8 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(int start_pos,
if (is_strict(language_mode())) {
CheckStrictOctalLiteral(start_pos, scanner()->location().end_pos,
CHECK_OK);
- }
-
- if (allow_harmony_scoping() && is_strict(language_mode()))
this->CheckConflictingVarDeclarations(scope, CHECK_OK);
+ }
}
FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
@@ -2933,6 +2985,7 @@ ParserBase<Traits>::ParseArrowFunctionLiteral(int start_pos,
start_pos);
function_literal->set_function_token_position(start_pos);
+ if (super_loc.IsValid()) function_state_->set_super_call_location(super_loc);
if (fni_ != NULL) this->InferFunctionName(fni_, function_literal);
@@ -2988,7 +3041,7 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start, bool* ok) {
} else if (next == Token::ILLEGAL) {
Traits::ReportMessageAt(
Scanner::Location(position() + 1, peek_position()),
- "unexpected_token", "ILLEGAL", false);
+ "unexpected_token", "ILLEGAL", kSyntaxError);
*ok = false;
return Traits::EmptyExpression();
}
@@ -3017,7 +3070,7 @@ ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start, bool* ok) {
} else if (next == Token::ILLEGAL) {
Traits::ReportMessageAt(
Scanner::Location(position() + 1, peek_position()),
- "unexpected_token", "ILLEGAL", false);
+ "unexpected_token", "ILLEGAL", kSyntaxError);
*ok = false;
return Traits::EmptyExpression();
}
@@ -3039,7 +3092,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
const char* message, bool* ok) {
if (is_strict(language_mode()) && this->IsIdentifier(expression) &&
this->IsEvalOrArguments(this->AsIdentifier(expression))) {
- this->ReportMessageAt(location, "strict_eval_arguments", false);
+ this->ReportMessageAt(location, "strict_eval_arguments", kSyntaxError);
*ok = false;
return this->EmptyExpression();
} else if (expression->IsValidReferenceExpression()) {
@@ -3051,7 +3104,7 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
ExpressionT error = this->NewThrowReferenceError(message, pos);
return factory()->NewProperty(expression, error, pos);
} else {
- this->ReportMessageAt(location, message, true);
+ this->ReportMessageAt(location, message, kReferenceError);
*ok = false;
return this->EmptyExpression();
}
@@ -3069,7 +3122,7 @@ void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
DCHECK(!is_static);
DCHECK(!is_generator || type == kMethodProperty);
- if (property == Token::NUMBER) return;
+ if (property == Token::SMI || property == Token::NUMBER) return;
if (type == kValueProperty && IsProto()) {
if (has_seen_proto_) {
@@ -3089,7 +3142,7 @@ void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
bool* ok) {
DCHECK(type == kMethodProperty || type == kAccessorProperty);
- if (property == Token::NUMBER) return;
+ if (property == Token::SMI || property == Token::NUMBER) return;
if (is_static) {
if (IsPrototype()) {
@@ -3099,7 +3152,9 @@ void ParserBase<Traits>::ClassLiteralChecker::CheckProperty(
}
} else if (IsConstructor()) {
if (is_generator || type == kAccessorProperty) {
- this->parser()->ReportMessage("constructor_special_method");
+ const char* msg =
+ is_generator ? "constructor_is_generator" : "constructor_is_accessor";
+ this->parser()->ReportMessage(msg);
*ok = false;
return;
}
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index da43d0eb0f..165e717212 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -106,7 +106,6 @@ void CallPrinter::VisitModuleDeclaration(ModuleDeclaration* node) {
void CallPrinter::VisitImportDeclaration(ImportDeclaration* node) {
- Find(node->module());
}
@@ -481,7 +480,7 @@ void PrettyPrinter::VisitImportDeclaration(ImportDeclaration* node) {
Print("import ");
PrintLiteral(node->proxy()->name(), false);
Print(" from ");
- Visit(node->module());
+ PrintLiteral(node->module_specifier()->string(), true);
Print(";");
}
@@ -1213,7 +1212,7 @@ void AstPrinter::VisitModuleDeclaration(ModuleDeclaration* node) {
void AstPrinter::VisitImportDeclaration(ImportDeclaration* node) {
IndentedScope indent(this, "IMPORT");
PrintLiteralIndented("NAME", node->proxy()->name(), true);
- Visit(node->module());
+ PrintLiteralIndented("FROM", node->module_specifier()->string(), true);
}
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index d9f55dd44b..1e543dda8b 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -21,12 +21,12 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag, const char* name,
resource_name_(resource_name),
line_number_(line_number),
column_number_(column_number),
- shared_id_(0),
script_id_(v8::UnboundScript::kNoScriptId),
+ position_(0),
no_frame_ranges_(NULL),
bailout_reason_(kEmptyBailoutReason),
deopt_reason_(kNoDeoptReason),
- deopt_location_(0),
+ deopt_position_(SourcePosition::Unknown()),
line_info_(line_info),
instruction_start_(instruction_start) {}
@@ -48,6 +48,11 @@ ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry)
children_(CodeEntriesMatch),
id_(tree->next_node_id()),
line_ticks_(LineTickMatch) {}
+
+
+inline unsigned ProfileNode::function_id() const {
+ return tree_->GetFunctionId(this);
+}
} } // namespace v8::internal
#endif // V8_PROFILE_GENERATOR_INL_H_
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 292e3325d1..385e753025 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -18,120 +18,6 @@ namespace v8 {
namespace internal {
-bool StringsStorage::StringsMatch(void* key1, void* key2) {
- return strcmp(reinterpret_cast<char*>(key1),
- reinterpret_cast<char*>(key2)) == 0;
-}
-
-
-StringsStorage::StringsStorage(Heap* heap)
- : hash_seed_(heap->HashSeed()), names_(StringsMatch) {
-}
-
-
-StringsStorage::~StringsStorage() {
- for (HashMap::Entry* p = names_.Start();
- p != NULL;
- p = names_.Next(p)) {
- DeleteArray(reinterpret_cast<const char*>(p->value));
- }
-}
-
-
-const char* StringsStorage::GetCopy(const char* src) {
- int len = static_cast<int>(strlen(src));
- HashMap::Entry* entry = GetEntry(src, len);
- if (entry->value == NULL) {
- Vector<char> dst = Vector<char>::New(len + 1);
- StrNCpy(dst, src, len);
- dst[len] = '\0';
- entry->key = dst.start();
- entry->value = entry->key;
- }
- return reinterpret_cast<const char*>(entry->value);
-}
-
-
-const char* StringsStorage::GetFormatted(const char* format, ...) {
- va_list args;
- va_start(args, format);
- const char* result = GetVFormatted(format, args);
- va_end(args);
- return result;
-}
-
-
-const char* StringsStorage::AddOrDisposeString(char* str, int len) {
- HashMap::Entry* entry = GetEntry(str, len);
- if (entry->value == NULL) {
- // New entry added.
- entry->key = str;
- entry->value = str;
- } else {
- DeleteArray(str);
- }
- return reinterpret_cast<const char*>(entry->value);
-}
-
-
-const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
- Vector<char> str = Vector<char>::New(1024);
- int len = VSNPrintF(str, format, args);
- if (len == -1) {
- DeleteArray(str.start());
- return GetCopy(format);
- }
- return AddOrDisposeString(str.start(), len);
-}
-
-
-const char* StringsStorage::GetName(Name* name) {
- if (name->IsString()) {
- String* str = String::cast(name);
- int length = Min(kMaxNameSize, str->length());
- int actual_length = 0;
- SmartArrayPointer<char> data =
- str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length,
- &actual_length);
- return AddOrDisposeString(data.Detach(), actual_length);
- } else if (name->IsSymbol()) {
- return "<symbol>";
- }
- return "";
-}
-
-
-const char* StringsStorage::GetName(int index) {
- return GetFormatted("%d", index);
-}
-
-
-const char* StringsStorage::GetFunctionName(Name* name) {
- return GetName(name);
-}
-
-
-const char* StringsStorage::GetFunctionName(const char* name) {
- return GetCopy(name);
-}
-
-
-size_t StringsStorage::GetUsedMemorySize() const {
- size_t size = sizeof(*this);
- size += sizeof(HashMap::Entry) * names_.capacity();
- for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
- size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
- }
- return size;
-}
-
-
-HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
- uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
- return names_.Lookup(const_cast<char*>(str), hash, true);
-}
-
-
JITLineInfoTable::JITLineInfoTable() {}
@@ -169,10 +55,12 @@ CodeEntry::~CodeEntry() {
}
-uint32_t CodeEntry::GetCallUid() const {
+uint32_t CodeEntry::GetHash() const {
uint32_t hash = ComputeIntegerHash(tag(), v8::internal::kZeroHashSeed);
- if (shared_id_ != 0) {
- hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
+ if (script_id_ != v8::UnboundScript::kNoScriptId) {
+ hash ^= ComputeIntegerHash(static_cast<uint32_t>(script_id_),
+ v8::internal::kZeroHashSeed);
+ hash ^= ComputeIntegerHash(static_cast<uint32_t>(position_),
v8::internal::kZeroHashSeed);
} else {
hash ^= ComputeIntegerHash(
@@ -190,13 +78,14 @@ uint32_t CodeEntry::GetCallUid() const {
}
-bool CodeEntry::IsSameAs(CodeEntry* entry) const {
- return this == entry ||
- (tag() == entry->tag() && shared_id_ == entry->shared_id_ &&
- (shared_id_ != 0 ||
- (name_prefix_ == entry->name_prefix_ && name_ == entry->name_ &&
- resource_name_ == entry->resource_name_ &&
- line_number_ == entry->line_number_)));
+bool CodeEntry::IsSameFunctionAs(CodeEntry* entry) const {
+ if (this == entry) return true;
+ if (script_id_ != v8::UnboundScript::kNoScriptId) {
+ return script_id_ == entry->script_id_ && position_ == entry->position_;
+ }
+ return name_prefix_ == entry->name_prefix_ && name_ == entry->name_ &&
+ resource_name_ == entry->resource_name_ &&
+ line_number_ == entry->line_number_;
}
@@ -214,8 +103,51 @@ int CodeEntry::GetSourceLine(int pc_offset) const {
}
+void CodeEntry::FillFunctionInfo(SharedFunctionInfo* shared) {
+ if (!shared->script()->IsScript()) return;
+ Script* script = Script::cast(shared->script());
+ set_script_id(script->id()->value());
+ set_position(shared->start_position());
+ set_bailout_reason(GetBailoutReason(shared->disable_optimization_reason()));
+}
+
+
+DeoptInfo CodeEntry::GetDeoptInfo() {
+ DCHECK(has_deopt_info());
+
+ DeoptInfo info;
+ info.deopt_reason = deopt_reason_;
+ if (inlined_function_infos_.empty()) {
+ info.stack.push_back(DeoptInfo::Frame(
+ {script_id_,
+ static_cast<int>(position_ + deopt_position_.position())}));
+ return info;
+ }
+ // Copy the only branch from the inlining tree where the deopt happened.
+ SourcePosition position = deopt_position_;
+ int inlining_id = InlinedFunctionInfo::kNoParentId;
+ for (size_t i = 0; i < inlined_function_infos_.size(); ++i) {
+ InlinedFunctionInfo& current_info = inlined_function_infos_.at(i);
+ if (std::binary_search(current_info.deopt_pc_offsets.begin(),
+ current_info.deopt_pc_offsets.end(), pc_offset_)) {
+ inlining_id = static_cast<int>(i);
+ break;
+ }
+ }
+ while (inlining_id != InlinedFunctionInfo::kNoParentId) {
+ InlinedFunctionInfo& inlined_info = inlined_function_infos_.at(inlining_id);
+ info.stack.push_back(DeoptInfo::Frame(
+ {inlined_info.script_id,
+ static_cast<int>(inlined_info.start_position + position.raw())}));
+ position = inlined_info.inline_position;
+ inlining_id = inlined_info.parent_id;
+ }
+ return info;
+}
+
+
void ProfileNode::CollectDeoptInfo(CodeEntry* entry) {
- deopt_infos_.Add(DeoptInfo(entry->deopt_reason(), entry->deopt_location()));
+ deopt_infos_.push_back(entry->GetDeoptInfo());
entry->clear_deopt_info();
}
@@ -283,9 +215,17 @@ void ProfileNode::Print(int indent) {
if (entry_->resource_name()[0] != '\0')
base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
base::OS::Print("\n");
- for (auto info : deopt_infos_) {
- base::OS::Print("%*s deopted at %d with reason '%s'\n", indent + 10, "",
- info.deopt_location, info.deopt_reason);
+ for (size_t i = 0; i < deopt_infos_.size(); ++i) {
+ DeoptInfo& info = deopt_infos_[i];
+ base::OS::Print(
+ "%*s;;; deopted at script_id: %d position: %d with reason '%s'.\n",
+ indent + 10, "", info.stack[0].script_id, info.stack[0].position,
+ info.deopt_reason);
+ for (size_t index = 1; index < info.stack.size(); ++index) {
+ base::OS::Print("%*s;;; Inline point: script_id %d position: %d.\n",
+ indent + 10, "", info.stack[index].script_id,
+ info.stack[index].position);
+ }
}
const char* bailout_reason = entry_->bailout_reason();
if (bailout_reason != GetBailoutReason(BailoutReason::kNoReason) &&
@@ -316,8 +256,9 @@ class DeleteNodesCallback {
ProfileTree::ProfileTree()
: root_entry_(Logger::FUNCTION_TAG, "(root)"),
next_node_id_(1),
- root_(new ProfileNode(this, &root_entry_)) {
-}
+ root_(new ProfileNode(this, &root_entry_)),
+ next_function_id_(1),
+ function_ids_(ProfileNode::CodeEntriesMatch) {}
ProfileTree::~ProfileTree() {
@@ -326,6 +267,17 @@ ProfileTree::~ProfileTree() {
}
+unsigned ProfileTree::GetFunctionId(const ProfileNode* node) {
+ CodeEntry* code_entry = node->entry();
+ HashMap::Entry* entry =
+ function_ids_.Lookup(code_entry, code_entry->GetHash(), true);
+ if (!entry->value) {
+ entry->value = reinterpret_cast<void*>(next_function_id_++);
+ }
+ return static_cast<unsigned>(reinterpret_cast<uintptr_t>(entry->value));
+}
+
+
ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path,
int src_line) {
ProfileNode* node = root_;
@@ -427,7 +379,6 @@ void CpuProfile::Print() {
}
-CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
@@ -469,22 +420,6 @@ CodeEntry* CodeMap::FindEntry(Address addr, Address* start) {
}
-int CodeMap::GetSharedId(Address addr) {
- CodeTree::Locator locator;
- // For shared function entries, 'size' field is used to store their IDs.
- if (tree_.Find(addr, &locator)) {
- const CodeEntryInfo& entry = locator.value();
- DCHECK(entry.entry == kSharedFunctionCodeEntry);
- return entry.size;
- } else {
- tree_.Insert(addr, &locator);
- int id = next_shared_id_++;
- locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry, id));
- return id;
- }
-}
-
-
void CodeMap::MoveCode(Address from, Address to) {
if (from == to) return;
CodeTree::Locator locator;
@@ -497,12 +432,7 @@ void CodeMap::MoveCode(Address from, Address to) {
void CodeMap::CodeTreePrinter::Call(
const Address& key, const CodeMap::CodeEntryInfo& value) {
- // For shared function entries, 'size' field is used to store their IDs.
- if (value.entry == kSharedFunctionCodeEntry) {
- base::OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
- } else {
- base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
- }
+ base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index f7176a053a..e013e29c5d 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -8,43 +8,15 @@
#include <map>
#include "include/v8-profiler.h"
#include "src/allocation.h"
+#include "src/compiler.h"
#include "src/hashmap.h"
+#include "src/strings-storage.h"
namespace v8 {
namespace internal {
struct OffsetRange;
-// Provides a storage of strings allocated in C++ heap, to hold them
-// forever, even if they disappear from JS heap or external storage.
-class StringsStorage {
- public:
- explicit StringsStorage(Heap* heap);
- ~StringsStorage();
-
- const char* GetCopy(const char* src);
- const char* GetFormatted(const char* format, ...);
- const char* GetVFormatted(const char* format, va_list args);
- const char* GetName(Name* name);
- const char* GetName(int index);
- const char* GetFunctionName(Name* name);
- const char* GetFunctionName(const char* name);
- size_t GetUsedMemorySize() const;
-
- private:
- static const int kMaxNameSize = 1024;
-
- static bool StringsMatch(void* key1, void* key2);
- const char* AddOrDisposeString(char* str, int len);
- HashMap::Entry* GetEntry(const char* str, int len);
-
- uint32_t hash_seed_;
- HashMap names_;
-
- DISALLOW_COPY_AND_ASSIGN(StringsStorage);
-};
-
-
// Provides a mapping from the offsets within generated code to
// the source line.
class JITLineInfoTable : public Malloced {
@@ -64,6 +36,17 @@ class JITLineInfoTable : public Malloced {
DISALLOW_COPY_AND_ASSIGN(JITLineInfoTable);
};
+
+struct DeoptInfo {
+ const char* deopt_reason;
+ struct Frame {
+ int script_id;
+ int position;
+ };
+ std::vector<Frame> stack;
+};
+
+
class CodeEntry {
public:
// CodeEntry doesn't own name strings, just references them.
@@ -84,41 +67,54 @@ class CodeEntry {
int line_number() const { return line_number_; }
int column_number() const { return column_number_; }
const JITLineInfoTable* line_info() const { return line_info_; }
- void set_shared_id(int shared_id) { shared_id_ = shared_id; }
int script_id() const { return script_id_; }
void set_script_id(int script_id) { script_id_ = script_id; }
+ int position() const { return position_; }
+ void set_position(int position) { position_ = position; }
void set_bailout_reason(const char* bailout_reason) {
bailout_reason_ = bailout_reason;
}
const char* bailout_reason() const { return bailout_reason_; }
- void set_deopt_info(const char* deopt_reason, int location) {
- DCHECK(!deopt_location_);
+ void set_deopt_info(const char* deopt_reason, SourcePosition position,
+ size_t pc_offset) {
+ DCHECK(deopt_position_.IsUnknown());
deopt_reason_ = deopt_reason;
- deopt_location_ = location;
+ deopt_position_ = position;
+ pc_offset_ = pc_offset;
}
+ DeoptInfo GetDeoptInfo();
const char* deopt_reason() const { return deopt_reason_; }
- int deopt_location() const { return deopt_location_; }
- bool has_deopt_info() const { return deopt_location_; }
+ SourcePosition deopt_position() const { return deopt_position_; }
+ bool has_deopt_info() const { return !deopt_position_.IsUnknown(); }
void clear_deopt_info() {
deopt_reason_ = kNoDeoptReason;
- deopt_location_ = 0;
+ deopt_position_ = SourcePosition::Unknown();
}
+ void FillFunctionInfo(SharedFunctionInfo* shared);
+
static inline bool is_js_function_tag(Logger::LogEventsAndTags tag);
List<OffsetRange>* no_frame_ranges() const { return no_frame_ranges_; }
void set_no_frame_ranges(List<OffsetRange>* ranges) {
no_frame_ranges_ = ranges;
}
+ void set_inlined_function_infos(
+ const std::vector<InlinedFunctionInfo>& infos) {
+ inlined_function_infos_ = infos;
+ }
+ const std::vector<InlinedFunctionInfo> inlined_function_infos() {
+ return inlined_function_infos_;
+ }
void SetBuiltinId(Builtins::Name id);
Builtins::Name builtin_id() const {
return BuiltinIdField::decode(bit_field_);
}
- uint32_t GetCallUid() const;
- bool IsSameAs(CodeEntry* entry) const;
+ uint32_t GetHash() const;
+ bool IsSameFunctionAs(CodeEntry* entry) const;
int GetSourceLine(int pc_offset) const;
@@ -140,15 +136,18 @@ class CodeEntry {
const char* resource_name_;
int line_number_;
int column_number_;
- int shared_id_;
int script_id_;
+ int position_;
List<OffsetRange>* no_frame_ranges_;
const char* bailout_reason_;
const char* deopt_reason_;
- int deopt_location_;
+ SourcePosition deopt_position_;
+ size_t pc_offset_;
JITLineInfoTable* line_info_;
Address instruction_start_;
+ std::vector<InlinedFunctionInfo> inlined_function_infos_;
+
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
};
@@ -156,17 +155,6 @@ class CodeEntry {
class ProfileTree;
class ProfileNode {
- private:
- struct DeoptInfo {
- DeoptInfo(const char* deopt_reason, int deopt_location)
- : deopt_reason(deopt_reason), deopt_location(deopt_location) {}
- DeoptInfo(const DeoptInfo& info)
- : deopt_reason(info.deopt_reason),
- deopt_location(info.deopt_location) {}
- const char* deopt_reason;
- int deopt_location;
- };
-
public:
inline ProfileNode(ProfileTree* tree, CodeEntry* entry);
@@ -180,23 +168,22 @@ class ProfileNode {
unsigned self_ticks() const { return self_ticks_; }
const List<ProfileNode*>* children() const { return &children_list_; }
unsigned id() const { return id_; }
+ unsigned function_id() const;
unsigned int GetHitLineCount() const { return line_ticks_.occupancy(); }
bool GetLineTicks(v8::CpuProfileNode::LineTick* entries,
unsigned int length) const;
void CollectDeoptInfo(CodeEntry* entry);
- const List<DeoptInfo>& deopt_infos() const { return deopt_infos_; }
+ const std::vector<DeoptInfo>& deopt_infos() const { return deopt_infos_; }
void Print(int indent);
- private:
static bool CodeEntriesMatch(void* entry1, void* entry2) {
- return reinterpret_cast<CodeEntry*>(entry1)->IsSameAs(
- reinterpret_cast<CodeEntry*>(entry2));
+ return reinterpret_cast<CodeEntry*>(entry1)
+ ->IsSameFunctionAs(reinterpret_cast<CodeEntry*>(entry2));
}
- static uint32_t CodeEntryHash(CodeEntry* entry) {
- return entry->GetCallUid();
- }
+ private:
+ static uint32_t CodeEntryHash(CodeEntry* entry) { return entry->GetHash(); }
static bool LineTickMatch(void* a, void* b) { return a == b; }
@@ -209,7 +196,8 @@ class ProfileNode {
unsigned id_;
HashMap line_ticks_;
- List<DeoptInfo> deopt_infos_;
+ std::vector<DeoptInfo> deopt_infos_;
+
DISALLOW_COPY_AND_ASSIGN(ProfileNode);
};
@@ -224,6 +212,7 @@ class ProfileTree {
int src_line = v8::CpuProfileNode::kNoLineNumberInfo);
ProfileNode* root() const { return root_; }
unsigned next_node_id() { return next_node_id_++; }
+ unsigned GetFunctionId(const ProfileNode* node);
void Print() {
root_->Print(0);
@@ -237,6 +226,9 @@ class ProfileTree {
unsigned next_node_id_;
ProfileNode* root_;
+ unsigned next_function_id_;
+ HashMap function_ids_;
+
DISALLOW_COPY_AND_ASSIGN(ProfileTree);
};
@@ -281,7 +273,7 @@ class CpuProfile {
class CodeMap {
public:
- CodeMap() : next_shared_id_(1) { }
+ CodeMap() {}
void AddCode(Address addr, CodeEntry* entry, unsigned size);
void MoveCode(Address from, Address to);
CodeEntry* FindEntry(Address addr, Address* start = NULL);
@@ -315,11 +307,7 @@ class CodeMap {
void DeleteAllCoveredCode(Address start, Address end);
- // Fake CodeEntry pointer to distinguish shared function entries.
- static CodeEntry* const kSharedFunctionCodeEntry;
-
CodeTree tree_;
- int next_shared_id_;
DISALLOW_COPY_AND_ASSIGN(CodeMap);
};
diff --git a/deps/v8/src/promise.js b/deps/v8/src/promise.js
index c096296b0e..c7bd204bb0 100644
--- a/deps/v8/src/promise.js
+++ b/deps/v8/src/promise.js
@@ -44,7 +44,7 @@ var lastMicrotaskId = 0;
throw MakeTypeError('resolver_not_a_function', [resolver]);
var promise = PromiseInit(this);
try {
- %DebugPushPromise(promise);
+ %DebugPushPromise(promise, Promise);
resolver(function(x) { PromiseResolve(promise, x) },
function(r) { PromiseReject(promise, r) });
} catch (e) {
@@ -110,7 +110,7 @@ var lastMicrotaskId = 0;
function PromiseHandle(value, handler, deferred) {
try {
- %DebugPushPromise(deferred.promise);
+ %DebugPushPromise(deferred.promise, PromiseHandle);
DEBUG_PREPARE_STEP_IN_IF_STEPPING(handler);
var result = handler(value);
if (result === deferred.promise)
@@ -301,51 +301,44 @@ var lastMicrotaskId = 0;
return IsPromise(x) ? x : new this(function(resolve) { resolve(x) });
}
- function PromiseAll(values) {
+ function PromiseAll(iterable) {
var deferred = %_CallFunction(this, PromiseDeferred);
var resolutions = [];
- if (!%_IsArray(values)) {
- deferred.reject(MakeTypeError('invalid_argument'));
- return deferred.promise;
- }
try {
- var count = values.length;
- if (count === 0) {
- deferred.resolve(resolutions);
- } else {
- for (var i = 0; i < values.length; ++i) {
- this.resolve(values[i]).then(
- (function() {
- // Nested scope to get closure over current i (and avoid .bind).
- // TODO(rossberg): Use for-let instead once available.
- var i_captured = i;
+ var count = 0;
+ var i = 0;
+ for (var value of iterable) {
+ this.resolve(value).then(
+ // Nested scope to get closure over current i.
+ // TODO(arv): Use an inner let binding once available.
+ (function(i) {
return function(x) {
- resolutions[i_captured] = x;
+ resolutions[i] = x;
if (--count === 0) deferred.resolve(resolutions);
- };
- })(),
- function(r) { deferred.reject(r) }
- );
- }
+ }
+ })(i),
+ function(r) { deferred.reject(r); });
+ ++i;
+ ++count;
+ }
+
+ if (count === 0) {
+ deferred.resolve(resolutions);
}
+
} catch (e) {
deferred.reject(e)
}
return deferred.promise;
}
- function PromiseOne(values) {
+ function PromiseRace(iterable) {
var deferred = %_CallFunction(this, PromiseDeferred);
- if (!%_IsArray(values)) {
- deferred.reject(MakeTypeError('invalid_argument'));
- return deferred.promise;
- }
try {
- for (var i = 0; i < values.length; ++i) {
- this.resolve(values[i]).then(
- function(x) { deferred.resolve(x) },
- function(r) { deferred.reject(r) }
- );
+ for (var value of iterable) {
+ this.resolve(value).then(
+ function(x) { deferred.resolve(x) },
+ function(r) { deferred.reject(r) });
}
} catch (e) {
deferred.reject(e)
@@ -388,7 +381,7 @@ var lastMicrotaskId = 0;
"accept", PromiseResolved,
"reject", PromiseRejected,
"all", PromiseAll,
- "race", PromiseOne,
+ "race", PromiseRace,
"resolve", PromiseCast
]);
InstallFunctions($Promise.prototype, DONT_ENUM, [
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 135a079d26..e71419ac8c 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -187,16 +187,25 @@ static const int kInvalidEnumCacheSentinel =
(1 << kDescriptorIndexBitCount) - 1;
+enum class PropertyCellType {
+ kUninitialized, // Cell is deleted or not yet defined.
+ kUndefined, // The PREMONOMORPHIC of property cells.
+ kConstant, // Cell has been assigned only once.
+ kMutable, // Cell will no longer be tracked as constant.
+ kDeleted = kConstant, // like kUninitialized, but for cells already deleted.
+ kInvalid = kMutable, // For dictionaries not holding cells.
+};
+
+
// PropertyDetails captures type and attributes for a property.
// They are used both in property dictionaries and instance descriptors.
class PropertyDetails BASE_EMBEDDED {
public:
- PropertyDetails(PropertyAttributes attributes,
- PropertyType type,
- int index) {
- value_ = TypeField::encode(type)
- | AttributesField::encode(attributes)
- | DictionaryStorageField::encode(index);
+ PropertyDetails(PropertyAttributes attributes, PropertyType type, int index,
+ PropertyCellType cell_type) {
+ value_ = TypeField::encode(type) | AttributesField::encode(attributes) |
+ DictionaryStorageField::encode(index) |
+ PropertyCellTypeField::encode(cell_type);
DCHECK(type == this->type());
DCHECK(attributes == this->attributes());
@@ -221,14 +230,32 @@ class PropertyDetails BASE_EMBEDDED {
FieldIndexField::encode(field_index);
}
+ static PropertyDetails Empty() {
+ return PropertyDetails(NONE, DATA, 0, PropertyCellType::kInvalid);
+ }
+
int pointer() const { return DescriptorPointer::decode(value_); }
- PropertyDetails set_pointer(int i) { return PropertyDetails(value_, i); }
+ PropertyDetails set_pointer(int i) const {
+ return PropertyDetails(value_, i);
+ }
+
+ PropertyDetails set_cell_type(PropertyCellType type) const {
+ PropertyDetails details = *this;
+ details.value_ = PropertyCellTypeField::update(details.value_, type);
+ return details;
+ }
+
+ PropertyDetails set_index(int index) const {
+ PropertyDetails details = *this;
+ details.value_ = DictionaryStorageField::update(details.value_, index);
+ return details;
+ }
PropertyDetails CopyWithRepresentation(Representation representation) const {
return PropertyDetails(value_, representation);
}
- PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) {
+ PropertyDetails CopyAddAttributes(PropertyAttributes new_attributes) const {
new_attributes =
static_cast<PropertyAttributes>(attributes() | new_attributes);
return PropertyDetails(value_, new_attributes);
@@ -267,8 +294,6 @@ class PropertyDetails BASE_EMBEDDED {
inline int field_width_in_words() const;
- inline PropertyDetails AsDeleted() const;
-
static bool IsValidIndex(int index) {
return DictionaryStorageField::is_valid(index);
}
@@ -276,7 +301,9 @@ class PropertyDetails BASE_EMBEDDED {
bool IsReadOnly() const { return (attributes() & READ_ONLY) != 0; }
bool IsConfigurable() const { return (attributes() & DONT_DELETE) == 0; }
bool IsDontEnum() const { return (attributes() & DONT_ENUM) != 0; }
- bool IsDeleted() const { return DeletedField::decode(value_) != 0; }
+ PropertyCellType cell_type() const {
+ return PropertyCellTypeField::decode(value_);
+ }
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
@@ -285,8 +312,8 @@ class PropertyDetails BASE_EMBEDDED {
class AttributesField : public BitField<PropertyAttributes, 2, 3> {};
// Bit fields for normalized objects.
- class DeletedField : public BitField<uint32_t, 5, 1> {};
- class DictionaryStorageField : public BitField<uint32_t, 6, 24> {};
+ class PropertyCellTypeField : public BitField<PropertyCellType, 5, 2> {};
+ class DictionaryStorageField : public BitField<uint32_t, 7, 24> {};
// Bit fields for fast objects.
class RepresentationField : public BitField<uint32_t, 5, 4> {};
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index 5f8e6da407..c6556b3fc1 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -79,10 +79,14 @@ class DataDescriptor FINAL : public Descriptor {
PropertyAttributes attributes, Representation representation)
: Descriptor(key, HeapType::Any(key->GetIsolate()), attributes, DATA,
representation, field_index) {}
- DataDescriptor(Handle<Name> key, int field_index, Handle<HeapType> field_type,
+ // The field type is either a simple type or a map wrapped in a weak cell.
+ DataDescriptor(Handle<Name> key, int field_index,
+ Handle<Object> wrapped_field_type,
PropertyAttributes attributes, Representation representation)
- : Descriptor(key, field_type, attributes, DATA, representation,
- field_index) {}
+ : Descriptor(key, wrapped_field_type, attributes, DATA, representation,
+ field_index) {
+ DCHECK(wrapped_field_type->IsSmi() || wrapped_field_type->IsWeakCell());
+ }
};
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index 416f5865e2..0f49c3f80e 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -2,12 +2,39 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// var $Object = global.Object;
-// var $Array = global.Array;
+var $regexpExec;
+var $regexpExecNoTests;
+var $regexpLastMatchInfo;
+var $regexpLastMatchInfoOverride;
+var harmony_regexps = false;
+var harmony_unicode_regexps = false;
-var $RegExp = global.RegExp;
+(function() {
+
+%CheckIsBootstrapping();
+
+var GlobalRegExp = global.RegExp;
+var GlobalArray = global.Array;
+
+// Property of the builtins object for recording the result of the last
+// regexp match. The property $regexpLastMatchInfo includes the matchIndices
+// array of the last successful regexp match (an array of start/end index
+// pairs for the match and all the captured substrings), the invariant is
+// that there are at least two capture indeces. The array also contains
+// the subject string for the last successful match.
+$regexpLastMatchInfo = new InternalPackedArray(
+ 2, // REGEXP_NUMBER_OF_CAPTURES
+ "", // Last subject.
+ UNDEFINED, // Last input - settable with RegExpSetInput.
+ 0, // REGEXP_FIRST_CAPTURE + 0
+ 0 // REGEXP_FIRST_CAPTURE + 1
+);
+
+// Override last match info with an array of actual substrings.
+// Used internally by replace regexp with function.
+// The array has the format of an "apply" argument for a replacement
+// function.
+$regexpLastMatchInfoOverride = null;
// -------------------------------------------------------------------
@@ -44,7 +71,7 @@ function RegExpConstructor(pattern, flags) {
if (IS_REGEXP(pattern) && IS_UNDEFINED(flags)) {
return pattern;
}
- return new $RegExp(pattern, flags);
+ return new GlobalRegExp(pattern, flags);
}
}
@@ -60,7 +87,7 @@ function RegExpCompileJS(pattern, flags) {
// RegExp.prototype.compile and in the constructor, where they are
// the empty string. For compatibility with JSC, we match their
// behavior.
- if (this == $RegExp.prototype) {
+ if (this == GlobalRegExp.prototype) {
// We don't allow recompiling RegExp.prototype.
throw MakeTypeError('incompatible_method_receiver',
['RegExp.prototype.compile', this]);
@@ -74,8 +101,8 @@ function RegExpCompileJS(pattern, flags) {
function DoRegExpExec(regexp, string, index) {
- var result = %_RegExpExec(regexp, string, index, lastMatchInfo);
- if (result !== null) lastMatchInfoOverride = null;
+ var result = %_RegExpExec(regexp, string, index, $regexpLastMatchInfo);
+ if (result !== null) $regexpLastMatchInfoOverride = null;
return result;
}
@@ -108,9 +135,9 @@ endmacro
function RegExpExecNoTests(regexp, string, start) {
// Must be called with RegExp, string and positive integer as arguments.
- var matchInfo = %_RegExpExec(regexp, string, start, lastMatchInfo);
+ var matchInfo = %_RegExpExec(regexp, string, start, $regexpLastMatchInfo);
if (matchInfo !== null) {
- lastMatchInfoOverride = null;
+ $regexpLastMatchInfoOverride = null;
RETURN_NEW_RESULT_FROM_MATCH_INFO(matchInfo, string);
}
regexp.lastIndex = 0;
@@ -118,7 +145,7 @@ function RegExpExecNoTests(regexp, string, start) {
}
-function RegExpExec(string) {
+function RegExpExecJS(string) {
if (!IS_REGEXP(this)) {
throw MakeTypeError('incompatible_method_receiver',
['RegExp.prototype.exec', this]);
@@ -141,8 +168,8 @@ function RegExpExec(string) {
i = 0;
}
- // matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
+ // matchIndices is either null or the $regexpLastMatchInfo array.
+ var matchIndices = %_RegExpExec(this, string, i, $regexpLastMatchInfo);
if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
@@ -150,9 +177,9 @@ function RegExpExec(string) {
}
// Successful match.
- lastMatchInfoOverride = null;
+ $regexpLastMatchInfoOverride = null;
if (updateLastIndex) {
- this.lastIndex = lastMatchInfo[CAPTURE1];
+ this.lastIndex = $regexpLastMatchInfo[CAPTURE1];
}
RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
}
@@ -184,14 +211,14 @@ function RegExpTest(string) {
this.lastIndex = 0;
return false;
}
- // matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
+ // matchIndices is either null or the $regexpLastMatchInfo array.
+ var matchIndices = %_RegExpExec(this, string, i, $regexpLastMatchInfo);
if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return false;
}
- lastMatchInfoOverride = null;
- this.lastIndex = lastMatchInfo[CAPTURE1];
+ $regexpLastMatchInfoOverride = null;
+ this.lastIndex = $regexpLastMatchInfo[CAPTURE1];
return true;
} else {
// Non-global, non-sticky regexp.
@@ -205,13 +232,13 @@ function RegExpTest(string) {
%_StringCharCodeAt(regexp.source, 2) != 63) { // '?'
regexp = TrimRegExp(regexp);
}
- // matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
+ // matchIndices is either null or the $regexpLastMatchInfo array.
+ var matchIndices = %_RegExpExec(regexp, string, 0, $regexpLastMatchInfo);
if (IS_NULL(matchIndices)) {
this.lastIndex = 0;
return false;
}
- lastMatchInfoOverride = null;
+ $regexpLastMatchInfoOverride = null;
return true;
}
}
@@ -220,9 +247,9 @@ function TrimRegExp(regexp) {
if (!%_ObjectEquals(regexp_key, regexp)) {
regexp_key = regexp;
regexp_val =
- new $RegExp(%_SubString(regexp.source, 2, regexp.source.length),
- (regexp.ignoreCase ? regexp.multiline ? "im" : "i"
- : regexp.multiline ? "m" : ""));
+ new GlobalRegExp(%_SubString(regexp.source, 2, regexp.source.length),
+ (regexp.ignoreCase ? regexp.multiline ? "im" : "i"
+ : regexp.multiline ? "m" : ""));
}
return regexp_val;
}
@@ -248,30 +275,30 @@ function RegExpToString() {
// on the captures array of the last successful match and the subject string
// of the last successful match.
function RegExpGetLastMatch() {
- if (lastMatchInfoOverride !== null) {
- return OVERRIDE_MATCH(lastMatchInfoOverride);
+ if ($regexpLastMatchInfoOverride !== null) {
+ return OVERRIDE_MATCH($regexpLastMatchInfoOverride);
}
- var regExpSubject = LAST_SUBJECT(lastMatchInfo);
+ var regExpSubject = LAST_SUBJECT($regexpLastMatchInfo);
return %_SubString(regExpSubject,
- lastMatchInfo[CAPTURE0],
- lastMatchInfo[CAPTURE1]);
+ $regexpLastMatchInfo[CAPTURE0],
+ $regexpLastMatchInfo[CAPTURE1]);
}
function RegExpGetLastParen() {
- if (lastMatchInfoOverride) {
- var override = lastMatchInfoOverride;
+ if ($regexpLastMatchInfoOverride) {
+ var override = $regexpLastMatchInfoOverride;
if (override.length <= 3) return '';
return override[override.length - 3];
}
- var length = NUMBER_OF_CAPTURES(lastMatchInfo);
+ var length = NUMBER_OF_CAPTURES($regexpLastMatchInfo);
if (length <= 2) return ''; // There were no captures.
// We match the SpiderMonkey behavior: return the substring defined by the
// last pair (after the first pair) of elements of the capture array even if
// it is empty.
- var regExpSubject = LAST_SUBJECT(lastMatchInfo);
- var start = lastMatchInfo[CAPTURE(length - 2)];
- var end = lastMatchInfo[CAPTURE(length - 1)];
+ var regExpSubject = LAST_SUBJECT($regexpLastMatchInfo);
+ var start = $regexpLastMatchInfo[CAPTURE(length - 2)];
+ var end = $regexpLastMatchInfo[CAPTURE(length - 1)];
if (start != -1 && end != -1) {
return %_SubString(regExpSubject, start, end);
}
@@ -282,11 +309,11 @@ function RegExpGetLastParen() {
function RegExpGetLeftContext() {
var start_index;
var subject;
- if (!lastMatchInfoOverride) {
- start_index = lastMatchInfo[CAPTURE0];
- subject = LAST_SUBJECT(lastMatchInfo);
+ if (!$regexpLastMatchInfoOverride) {
+ start_index = $regexpLastMatchInfo[CAPTURE0];
+ subject = LAST_SUBJECT($regexpLastMatchInfo);
} else {
- var override = lastMatchInfoOverride;
+ var override = $regexpLastMatchInfoOverride;
start_index = OVERRIDE_POS(override);
subject = OVERRIDE_SUBJECT(override);
}
@@ -297,11 +324,11 @@ function RegExpGetLeftContext() {
function RegExpGetRightContext() {
var start_index;
var subject;
- if (!lastMatchInfoOverride) {
- start_index = lastMatchInfo[CAPTURE1];
- subject = LAST_SUBJECT(lastMatchInfo);
+ if (!$regexpLastMatchInfoOverride) {
+ start_index = $regexpLastMatchInfo[CAPTURE1];
+ subject = LAST_SUBJECT($regexpLastMatchInfo);
} else {
- var override = lastMatchInfoOverride;
+ var override = $regexpLastMatchInfoOverride;
subject = OVERRIDE_SUBJECT(override);
var match = OVERRIDE_MATCH(override);
start_index = OVERRIDE_POS(override) + match.length;
@@ -314,126 +341,106 @@ function RegExpGetRightContext() {
// successful match, or ''. The function RegExpMakeCaptureGetter will be
// called with indices from 1 to 9.
function RegExpMakeCaptureGetter(n) {
- return function() {
- if (lastMatchInfoOverride) {
- if (n < lastMatchInfoOverride.length - 2) {
- return OVERRIDE_CAPTURE(lastMatchInfoOverride, n);
+ return function foo() {
+ if ($regexpLastMatchInfoOverride) {
+ if (n < $regexpLastMatchInfoOverride.length - 2) {
+ return OVERRIDE_CAPTURE($regexpLastMatchInfoOverride, n);
}
return '';
}
var index = n * 2;
- if (index >= NUMBER_OF_CAPTURES(lastMatchInfo)) return '';
- var matchStart = lastMatchInfo[CAPTURE(index)];
- var matchEnd = lastMatchInfo[CAPTURE(index + 1)];
+ if (index >= NUMBER_OF_CAPTURES($regexpLastMatchInfo)) return '';
+ var matchStart = $regexpLastMatchInfo[CAPTURE(index)];
+ var matchEnd = $regexpLastMatchInfo[CAPTURE(index + 1)];
if (matchStart == -1 || matchEnd == -1) return '';
- return %_SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd);
+ return %_SubString(LAST_SUBJECT($regexpLastMatchInfo), matchStart, matchEnd);
};
}
-
-// Property of the builtins object for recording the result of the last
-// regexp match. The property lastMatchInfo includes the matchIndices
-// array of the last successful regexp match (an array of start/end index
-// pairs for the match and all the captured substrings), the invariant is
-// that there are at least two capture indeces. The array also contains
-// the subject string for the last successful match.
-var lastMatchInfo = new InternalPackedArray(
- 2, // REGEXP_NUMBER_OF_CAPTURES
- "", // Last subject.
- UNDEFINED, // Last input - settable with RegExpSetInput.
- 0, // REGEXP_FIRST_CAPTURE + 0
- 0 // REGEXP_FIRST_CAPTURE + 1
-);
-
-// Override last match info with an array of actual substrings.
-// Used internally by replace regexp with function.
-// The array has the format of an "apply" argument for a replacement
-// function.
-var lastMatchInfoOverride = null;
-
// -------------------------------------------------------------------
-function SetUpRegExp() {
- %CheckIsBootstrapping();
- %FunctionSetInstanceClassName($RegExp, 'RegExp');
- %AddNamedProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
- %SetCode($RegExp, RegExpConstructor);
-
- InstallFunctions($RegExp.prototype, DONT_ENUM, $Array(
- "exec", RegExpExec,
- "test", RegExpTest,
- "toString", RegExpToString,
- "compile", RegExpCompileJS
- ));
-
- // The length of compile is 1 in SpiderMonkey.
- %FunctionSetLength($RegExp.prototype.compile, 1);
-
- // The properties `input` and `$_` are aliases for each other. When this
- // value is set the value it is set to is coerced to a string.
- // Getter and setter for the input.
- var RegExpGetInput = function() {
- var regExpInput = LAST_INPUT(lastMatchInfo);
- return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
- };
- var RegExpSetInput = function(string) {
- LAST_INPUT(lastMatchInfo) = ToString(string);
- };
-
- %OptimizeObjectForAddingMultipleProperties($RegExp, 22);
- %DefineAccessorPropertyUnchecked($RegExp, 'input', RegExpGetInput,
- RegExpSetInput, DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, '$_', RegExpGetInput,
- RegExpSetInput, DONT_ENUM | DONT_DELETE);
-
- // The properties multiline and $* are aliases for each other. When this
- // value is set in SpiderMonkey, the value it is set to is coerced to a
- // boolean. We mimic that behavior with a slight difference: in SpiderMonkey
- // the value of the expression 'RegExp.multiline = null' (for instance) is the
- // boolean false (i.e., the value after coercion), while in V8 it is the value
- // null (i.e., the value before coercion).
-
- // Getter and setter for multiline.
- var multiline = false;
- var RegExpGetMultiline = function() { return multiline; };
- var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; };
-
- %DefineAccessorPropertyUnchecked($RegExp, 'multiline', RegExpGetMultiline,
- RegExpSetMultiline, DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, '$*', RegExpGetMultiline,
- RegExpSetMultiline,
- DONT_ENUM | DONT_DELETE);
-
-
- var NoOpSetter = function(ignored) {};
-
-
- // Static properties set by a successful match.
- %DefineAccessorPropertyUnchecked($RegExp, 'lastMatch', RegExpGetLastMatch,
- NoOpSetter, DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, '$&', RegExpGetLastMatch,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, 'lastParen', RegExpGetLastParen,
- NoOpSetter, DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, '$+', RegExpGetLastParen,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, 'leftContext',
- RegExpGetLeftContext, NoOpSetter,
+%FunctionSetInstanceClassName(GlobalRegExp, 'RegExp');
+%AddNamedProperty(
+ GlobalRegExp.prototype, 'constructor', GlobalRegExp, DONT_ENUM);
+%SetCode(GlobalRegExp, RegExpConstructor);
+
+InstallFunctions(GlobalRegExp.prototype, DONT_ENUM, GlobalArray(
+ "exec", RegExpExecJS,
+ "test", RegExpTest,
+ "toString", RegExpToString,
+ "compile", RegExpCompileJS
+));
+
+// The length of compile is 1 in SpiderMonkey.
+%FunctionSetLength(GlobalRegExp.prototype.compile, 1);
+
+// The properties `input` and `$_` are aliases for each other. When this
+// value is set the value it is set to is coerced to a string.
+// Getter and setter for the input.
+var RegExpGetInput = function() {
+ var regExpInput = LAST_INPUT($regexpLastMatchInfo);
+ return IS_UNDEFINED(regExpInput) ? "" : regExpInput;
+};
+var RegExpSetInput = function(string) {
+ LAST_INPUT($regexpLastMatchInfo) = ToString(string);
+};
+
+%OptimizeObjectForAddingMultipleProperties(GlobalRegExp, 22);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, 'input', RegExpGetInput,
+ RegExpSetInput, DONT_DELETE);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, '$_', RegExpGetInput,
+ RegExpSetInput, DONT_ENUM | DONT_DELETE);
+
+// The properties multiline and $* are aliases for each other. When this
+// value is set in SpiderMonkey, the value it is set to is coerced to a
+// boolean. We mimic that behavior with a slight difference: in SpiderMonkey
+// the value of the expression 'RegExp.multiline = null' (for instance) is the
+// boolean false (i.e., the value after coercion), while in V8 it is the value
+// null (i.e., the value before coercion).
+
+// Getter and setter for multiline.
+var multiline = false;
+var RegExpGetMultiline = function() { return multiline; };
+var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; };
+
+%DefineAccessorPropertyUnchecked(GlobalRegExp, 'multiline', RegExpGetMultiline,
+ RegExpSetMultiline, DONT_DELETE);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, '$*', RegExpGetMultiline,
+ RegExpSetMultiline,
+ DONT_ENUM | DONT_DELETE);
+
+
+var NoOpSetter = function(ignored) {};
+
+
+// Static properties set by a successful match.
+%DefineAccessorPropertyUnchecked(GlobalRegExp, 'lastMatch', RegExpGetLastMatch,
+ NoOpSetter, DONT_DELETE);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, '$&', RegExpGetLastMatch,
+ NoOpSetter, DONT_ENUM | DONT_DELETE);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, 'lastParen', RegExpGetLastParen,
+ NoOpSetter, DONT_DELETE);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, '$+', RegExpGetLastParen,
+ NoOpSetter, DONT_ENUM | DONT_DELETE);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, 'leftContext',
+ RegExpGetLeftContext, NoOpSetter,
+ DONT_DELETE);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, '$`', RegExpGetLeftContext,
+ NoOpSetter, DONT_ENUM | DONT_DELETE);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, 'rightContext',
+ RegExpGetRightContext, NoOpSetter,
+ DONT_DELETE);
+%DefineAccessorPropertyUnchecked(GlobalRegExp, "$'", RegExpGetRightContext,
+ NoOpSetter, DONT_ENUM | DONT_DELETE);
+
+for (var i = 1; i < 10; ++i) {
+ %DefineAccessorPropertyUnchecked(GlobalRegExp, '$' + i,
+ RegExpMakeCaptureGetter(i), NoOpSetter,
DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, '$`', RegExpGetLeftContext,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, 'rightContext',
- RegExpGetRightContext, NoOpSetter,
- DONT_DELETE);
- %DefineAccessorPropertyUnchecked($RegExp, "$'", RegExpGetRightContext,
- NoOpSetter, DONT_ENUM | DONT_DELETE);
-
- for (var i = 1; i < 10; ++i) {
- %DefineAccessorPropertyUnchecked($RegExp, '$' + i,
- RegExpMakeCaptureGetter(i), NoOpSetter,
- DONT_DELETE);
- }
- %ToFastProperties($RegExp);
}
+%ToFastProperties(GlobalRegExp);
+
+$regexpExecNoTests = RegExpExecNoTests;
+$regexpExec = DoRegExpExec;
-SetUpRegExp();
+})();
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index c81950e8ed..9a5a8709b2 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -4,10 +4,9 @@
#include "src/v8.h"
-#include "src/rewriter.h"
-
#include "src/ast.h"
-#include "src/compiler.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
#include "src/scopes.h"
namespace v8 {
@@ -222,7 +221,7 @@ EXPRESSION_NODE_LIST(DEF_VISIT)
// Assumes code has been parsed. Mutates the AST, so the AST should not
// continue to be used in the case of failure.
-bool Rewriter::Rewrite(CompilationInfo* info) {
+bool Rewriter::Rewrite(ParseInfo* info) {
FunctionLiteral* function = info->function();
DCHECK(function != NULL);
Scope* scope = function->scope();
diff --git a/deps/v8/src/rewriter.h b/deps/v8/src/rewriter.h
index 0423802bad..b283a55ce0 100644
--- a/deps/v8/src/rewriter.h
+++ b/deps/v8/src/rewriter.h
@@ -8,7 +8,7 @@
namespace v8 {
namespace internal {
-class CompilationInfo;
+class ParseInfo;
class Rewriter {
public:
@@ -18,7 +18,7 @@ class Rewriter {
//
// Assumes code has been parsed and scopes have been analyzed. Mutates the
// AST, so the AST should not continue to be used in the case of failure.
- static bool Rewrite(CompilationInfo* info);
+ static bool Rewrite(ParseInfo* info);
};
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 7d82dfa846..e4cb4ff312 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -418,7 +418,7 @@ function APPLY_PREPARE(args) {
// that takes care of more eventualities.
if (IS_ARRAY(args)) {
length = args.length;
- if (%_IsSmi(length) && length >= 0 && length < 0x800000 &&
+ if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength &&
IS_SPEC_FUNCTION(this)) {
return length;
}
@@ -429,7 +429,7 @@ function APPLY_PREPARE(args) {
// We can handle any number of apply arguments if the stack is
// big enough, but sanity check the value to avoid overflow when
// multiplying with pointer size.
- if (length > 0x800000) {
+ if (length > kSafeArgumentsLength) {
throw %MakeRangeError('stack_overflow', []);
}
@@ -449,6 +449,93 @@ function APPLY_PREPARE(args) {
}
+function REFLECT_APPLY_PREPARE(args) {
+ var length;
+ // First check whether length is a positive Smi and args is an
+ // array. This is the fast case. If this fails, we do the slow case
+ // that takes care of more eventualities.
+ if (IS_ARRAY(args)) {
+ length = args.length;
+ if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength &&
+ IS_SPEC_FUNCTION(this)) {
+ return length;
+ }
+ }
+
+ if (!IS_SPEC_FUNCTION(this)) {
+ throw %MakeTypeError('called_non_callable', [ %ToString(this) ]);
+ }
+
+ if (!IS_SPEC_OBJECT(args)) {
+ throw %MakeTypeError('reflect_apply_wrong_args', [ ]);
+ }
+
+ length = %ToLength(args.length);
+
+ // We can handle any number of apply arguments if the stack is
+ // big enough, but sanity check the value to avoid overflow when
+ // multiplying with pointer size.
+ if (length > kSafeArgumentsLength) {
+ throw %MakeRangeError('stack_overflow', []);
+ }
+
+ // Return the length which is the number of arguments to copy to the
+ // stack. It is guaranteed to be a small integer at this point.
+ return length;
+}
+
+
+function REFLECT_CONSTRUCT_PREPARE(args, newTarget) {
+ var length;
+ var ctorOk = IS_SPEC_FUNCTION(this) && %IsConstructor(this);
+ var newTargetOk = IS_SPEC_FUNCTION(newTarget) && %IsConstructor(newTarget);
+
+ // First check whether length is a positive Smi and args is an
+ // array. This is the fast case. If this fails, we do the slow case
+ // that takes care of more eventualities.
+ if (IS_ARRAY(args)) {
+ length = args.length;
+ if (%_IsSmi(length) && length >= 0 && length < kSafeArgumentsLength &&
+ ctorOk && newTargetOk) {
+ return length;
+ }
+ }
+
+ if (!ctorOk) {
+ if (!IS_SPEC_FUNCTION(this)) {
+ throw %MakeTypeError('called_non_callable', [ %ToString(this) ]);
+ } else {
+ throw %MakeTypeError('not_constructor', [ %ToString(this) ]);
+ }
+ }
+
+ if (!newTargetOk) {
+ if (!IS_SPEC_FUNCTION(newTarget)) {
+ throw %MakeTypeError('called_non_callable', [ %ToString(newTarget) ]);
+ } else {
+ throw %MakeTypeError('not_constructor', [ %ToString(newTarget) ]);
+ }
+ }
+
+ if (!IS_SPEC_OBJECT(args)) {
+ throw %MakeTypeError('reflect_construct_wrong_args', [ ]);
+ }
+
+ length = %ToLength(args.length);
+
+ // We can handle any number of apply arguments if the stack is
+ // big enough, but sanity check the value to avoid overflow when
+ // multiplying with pointer size.
+ if (length > kSafeArgumentsLength) {
+ throw %MakeRangeError('stack_overflow', []);
+ }
+
+ // Return the length which is the number of arguments to copy to the
+ // stack. It is guaranteed to be a small integer at this point.
+ return length;
+}
+
+
function STACK_OVERFLOW(length) {
throw %MakeRangeError('stack_overflow', []);
}
@@ -694,3 +781,14 @@ function ToPositiveInteger(x, rangeErrorName) {
// that is cloned when running the code. It is essential that the
// boilerplate gets the right prototype.
%FunctionSetPrototype($Array, new $Array(0));
+
+
+/* -----------------------------------------------
+ - - - J a v a S c r i p t S t u b s - - -
+ -----------------------------------------------
+*/
+
+function STRING_LENGTH_STUB(name) {
+ var receiver = this; // implicit first parameter
+ return %_StringGetLength(%_JSValueGetValue(receiver));
+}
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 6814385183..010c186070 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -444,8 +444,8 @@ static bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
for (uint32_t i = 0; i < length; ++i) {
HandleScope loop_scope(isolate);
Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
- if (!maybe.has_value) return false;
- if (maybe.value) {
+ if (!maybe.IsJust()) return false;
+ if (maybe.FromJust()) {
Handle<Object> element_value;
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
isolate, element_value,
@@ -511,8 +511,8 @@ static bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
visitor->visit(j, element_value);
} else {
Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
- if (!maybe.has_value) return false;
- if (maybe.value) {
+ if (!maybe.IsJust()) return false;
+ if (maybe.FromJust()) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
@@ -547,8 +547,8 @@ static bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
visitor->visit(j, element_value);
} else {
Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
- if (!maybe.has_value) return false;
- if (maybe.value) {
+ if (!maybe.IsJust()) return false;
+ if (maybe.FromJust()) {
// Call GetElement on receiver, not its prototype, or getters won't
// have the correct receiver.
Handle<Object> element_value;
@@ -1228,7 +1228,8 @@ RUNTIME_FUNCTION(Runtime_HasComplexElements) {
return isolate->heap()->true_value();
}
if (!current->HasDictionaryElements()) continue;
- if (current->element_dictionary()->HasComplexElements()) {
+ if (current->element_dictionary()
+ ->HasComplexElements<DictionaryEntryType::kObjects>()) {
return isolate->heap()->true_value();
}
}
@@ -1322,7 +1323,7 @@ RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInNext) {
}
-RUNTIME_FUNCTION(RuntimeReference_IsArray) {
+RUNTIME_FUNCTION(Runtime_IsArray) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
@@ -1330,23 +1331,26 @@ RUNTIME_FUNCTION(RuntimeReference_IsArray) {
}
-RUNTIME_FUNCTION(RuntimeReference_HasCachedArrayIndex) {
+RUNTIME_FUNCTION(Runtime_HasCachedArrayIndex) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
return isolate->heap()->false_value();
}
-RUNTIME_FUNCTION(RuntimeReference_GetCachedArrayIndex) {
- SealHandleScope shs(isolate);
- DCHECK(args.length() == 1);
- return isolate->heap()->undefined_value();
+RUNTIME_FUNCTION(Runtime_GetCachedArrayIndex) {
+ // This can never be reached, because Runtime_HasCachedArrayIndex always
+ // returns false.
+ UNIMPLEMENTED();
+ return nullptr;
}
-RUNTIME_FUNCTION(RuntimeReference_FastOneByteArrayJoin) {
+RUNTIME_FUNCTION(Runtime_FastOneByteArrayJoin) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
+ // Returning undefined means that this fast path fails and one has to resort
+ // to a slow path.
return isolate->heap()->undefined_value();
}
}
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index e88a76ac9e..67d7e6939a 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -56,6 +56,30 @@ RUNTIME_FUNCTION(Runtime_ThrowArrayNotSubclassableError) {
}
+static Object* ThrowStaticPrototypeError(Isolate* isolate) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate, NewTypeError("static_prototype", HandleVector<Object>(NULL, 0)));
+}
+
+
+RUNTIME_FUNCTION(Runtime_ThrowStaticPrototypeError) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+ return ThrowStaticPrototypeError(isolate);
+}
+
+
+RUNTIME_FUNCTION(Runtime_ThrowIfStaticPrototype) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Name, name, 0);
+ if (Name::Equals(name, isolate->factory()->prototype_string())) {
+ return ThrowStaticPrototypeError(isolate);
+ }
+ return *name;
+}
+
+
RUNTIME_FUNCTION(Runtime_ToMethod) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
@@ -117,7 +141,7 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
Handle<Map> map =
isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
map->SetPrototype(prototype_parent);
- map->set_constructor(*constructor);
+ map->SetConstructor(*constructor);
Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
Handle<String> name_string = name->IsString()
@@ -232,9 +256,8 @@ RUNTIME_FUNCTION(Runtime_ClassGetSourceCode) {
static Object* LoadFromSuper(Isolate* isolate, Handle<Object> receiver,
Handle<JSObject> home_object, Handle<Name> name) {
- if (home_object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(home_object, name, v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(home_object, v8::ACCESS_GET);
+ if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ isolate->ReportFailedAccessCheck(home_object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
}
@@ -252,9 +275,8 @@ static Object* LoadFromSuper(Isolate* isolate, Handle<Object> receiver,
static Object* LoadElementFromSuper(Isolate* isolate, Handle<Object> receiver,
Handle<JSObject> home_object,
uint32_t index) {
- if (home_object->IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(home_object, index, v8::ACCESS_GET)) {
- isolate->ReportFailedAccessCheck(home_object, v8::ACCESS_GET);
+ if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ isolate->ReportFailedAccessCheck(home_object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
}
@@ -306,9 +328,8 @@ RUNTIME_FUNCTION(Runtime_LoadKeyedFromSuper) {
static Object* StoreToSuper(Isolate* isolate, Handle<JSObject> home_object,
Handle<Object> receiver, Handle<Name> name,
Handle<Object> value, LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(home_object, name, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(home_object, v8::ACCESS_SET);
+ if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ isolate->ReportFailedAccessCheck(home_object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
}
@@ -331,9 +352,8 @@ static Object* StoreElementToSuper(Isolate* isolate,
Handle<Object> receiver, uint32_t index,
Handle<Object> value,
LanguageMode language_mode) {
- if (home_object->IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(home_object, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(home_object, v8::ACCESS_SET);
+ if (home_object->IsAccessCheckNeeded() && !isolate->MayAccess(home_object)) {
+ isolate->ReportFailedAccessCheck(home_object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
}
@@ -433,8 +453,8 @@ RUNTIME_FUNCTION(Runtime_HandleStepInForDerivedConstructors) {
}
-RUNTIME_FUNCTION(RuntimeReference_DefaultConstructorCallSuper) {
- UNREACHABLE();
+RUNTIME_FUNCTION(Runtime_DefaultConstructorCallSuper) {
+ UNIMPLEMENTED();
return nullptr;
}
}
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 0958da13a1..52fe1e7a8b 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -47,7 +47,6 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
- DCHECK(isolate->use_crankshaft());
Compiler::ConcurrencyMode mode =
concurrent ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
diff --git a/deps/v8/src/runtime/runtime-date.cc b/deps/v8/src/runtime/runtime-date.cc
index 4caf27de0f..844ca25fd5 100644
--- a/deps/v8/src/runtime/runtime-date.cc
+++ b/deps/v8/src/runtime/runtime-date.cc
@@ -152,6 +152,7 @@ RUNTIME_FUNCTION(Runtime_DateToUTC) {
RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
HandleScope hs(isolate);
DCHECK(args.length() == 0);
+ if (isolate->serializer_enabled()) return isolate->heap()->undefined_value();
if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
Handle<FixedArray> date_cache_version =
isolate->factory()->NewFixedArray(1, TENURED);
@@ -170,7 +171,7 @@ RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
}
-RUNTIME_FUNCTION(RuntimeReference_DateField) {
+RUNTIME_FUNCTION(Runtime_DateField) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_CHECKED(Object, obj, 0);
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index 563e80804f..b4e1fe90c4 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -6,6 +6,7 @@
#include "src/accessors.h"
#include "src/arguments.h"
+#include "src/compiler.h"
#include "src/debug.h"
#include "src/deoptimizer.h"
#include "src/isolate-inl.h"
@@ -53,7 +54,7 @@ RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
}
-RUNTIME_FUNCTION(Runtime_Break) {
+RUNTIME_FUNCTION(Runtime_ScheduleBreak) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
isolate->stack_guard()->RequestDebugBreak();
@@ -71,6 +72,7 @@ static Handle<Object> DebugGetProperty(LookupIterator* it,
case LookupIterator::ACCESS_CHECK:
// Ignore access checks.
break;
+ case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::INTERCEPTOR:
case LookupIterator::JSPROXY:
return it->isolate()->factory()->undefined_value();
@@ -136,7 +138,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
isolate, element_or_char,
Runtime::GetElementOrCharAt(isolate, obj, index));
details->set(0, *element_or_char);
- details->set(1, PropertyDetails(NONE, DATA, 0).AsSmi());
+ details->set(1, PropertyDetails::Empty().AsSmi());
return *isolate->factory()->NewJSArrayWithElements(details);
}
@@ -158,7 +160,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
details->set(0, *value);
// TODO(verwaest): Get rid of this random way of handling interceptors.
PropertyDetails d = it.state() == LookupIterator::INTERCEPTOR
- ? PropertyDetails(NONE, DATA, 0)
+ ? PropertyDetails::Empty()
: it.property_details();
details->set(1, d.AsSmi());
details->set(
@@ -913,8 +915,8 @@ static bool SetLocalVariableValue(Isolate* isolate, JavaScriptFrame* frame,
Handle<JSObject> ext(JSObject::cast(function_context->extension()));
Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
- DCHECK(maybe.has_value);
- if (maybe.value) {
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
// We don't expect this to do anything except replacing
// property value.
Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
@@ -996,8 +998,8 @@ static bool SetClosureVariableValue(Isolate* isolate, Handle<Context> context,
if (context->has_extension()) {
Handle<JSObject> ext(JSObject::cast(context->extension()));
Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
- DCHECK(maybe.has_value);
- if (maybe.value) {
+ DCHECK(maybe.IsJust());
+ if (maybe.FromJust()) {
// We don't expect this to do anything except replacing property value.
Runtime::DefineObjectProperty(ext, variable_name, new_value, NONE)
.Assert();
@@ -1164,18 +1166,19 @@ class ScopeIterator {
if (!ignore_nested_scopes) {
Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared_info);
- // Find the break point where execution has stopped.
- BreakLocationIterator break_location_iterator(debug_info,
- ALL_BREAK_LOCATIONS);
- // pc points to the instruction after the current one, possibly a break
+ // PC points to the instruction after the current one, possibly a break
// location as well. So the "- 1" to exclude it from the search.
- break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
+ Address call_pc = frame->pc() - 1;
+
+ // Find the break point where execution has stopped.
+ BreakLocation location =
+ BreakLocation::FromAddress(debug_info, ALL_BREAK_LOCATIONS, call_pc);
// Within the return sequence at the moment it is not possible to
// get a source position which is consistent with the current scope chain.
// Thus all nested with, catch and block contexts are skipped and we only
// provide the function scope.
- ignore_nested_scopes = break_location_iterator.IsExit();
+ ignore_nested_scopes = location.IsExit();
}
if (ignore_nested_scopes) {
@@ -1197,16 +1200,17 @@ class ScopeIterator {
// Check whether we are in global, eval or function code.
Handle<ScopeInfo> scope_info(shared_info->scope_info());
+ Zone zone;
if (scope_info->scope_type() != FUNCTION_SCOPE &&
scope_info->scope_type() != ARROW_SCOPE) {
// Global or eval code.
- CompilationInfoWithZone info(script);
+ ParseInfo info(&zone, script);
if (scope_info->scope_type() == SCRIPT_SCOPE) {
- info.MarkAsGlobal();
+ info.set_global();
} else {
DCHECK(scope_info->scope_type() == EVAL_SCOPE);
- info.MarkAsEval();
- info.SetContext(Handle<Context>(function_->context()));
+ info.set_eval();
+ info.set_context(Handle<Context>(function_->context()));
}
if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
scope = info.function()->scope();
@@ -1214,7 +1218,7 @@ class ScopeIterator {
RetrieveScopeChain(scope, shared_info);
} else {
// Function code
- CompilationInfoWithZone info(shared_info);
+ ParseInfo info(&zone, function_);
if (Parser::ParseStatic(&info) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
@@ -1548,9 +1552,11 @@ RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
JavaScriptFrameIterator frame_it(isolate, id);
RUNTIME_ASSERT(!frame_it.done());
- JavaScriptFrame* frame = frame_it.frame();
+ List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+ frame_it.frame()->Summarize(&frames);
+ FrameSummary summary = frames.first();
- Handle<JSFunction> fun = Handle<JSFunction>(frame->function());
+ Handle<JSFunction> fun = Handle<JSFunction>(summary.function());
Handle<SharedFunctionInfo> shared = Handle<SharedFunctionInfo>(fun->shared());
if (!isolate->debug()->EnsureDebugInfo(shared, fun)) {
@@ -1559,18 +1565,19 @@ RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
Handle<DebugInfo> debug_info = Debug::GetDebugInfo(shared);
- int len = 0;
- Handle<JSArray> array(isolate->factory()->NewJSArray(10));
- // Find the break point where execution has stopped.
- BreakLocationIterator break_location_iterator(debug_info,
- ALL_BREAK_LOCATIONS);
+ // Find range of break points starting from the break point where execution
+ // has stopped.
+ Address call_pc = summary.pc() - 1;
+ List<BreakLocation> locations;
+ BreakLocation::FromAddressSameStatement(debug_info, ALL_BREAK_LOCATIONS,
+ call_pc, &locations);
- break_location_iterator.FindBreakLocationFromAddress(frame->pc() - 1);
- int current_statement_pos = break_location_iterator.statement_position();
+ Handle<JSArray> array = isolate->factory()->NewJSArray(locations.length());
- while (!break_location_iterator.Done()) {
+ int index = 0;
+ for (BreakLocation location : locations) {
bool accept;
- if (break_location_iterator.pc() > frame->pc()) {
+ if (location.pc() > summary.pc()) {
accept = true;
} else {
StackFrame::Id break_frame_id = isolate->debug()->break_frame_id();
@@ -1587,20 +1594,15 @@ RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
}
}
if (accept) {
- if (break_location_iterator.IsStepInLocation(isolate)) {
- Smi* position_value = Smi::FromInt(break_location_iterator.position());
+ if (location.IsStepInLocation()) {
+ Smi* position_value = Smi::FromInt(location.position());
RETURN_FAILURE_ON_EXCEPTION(
isolate, JSObject::SetElement(
- array, len, Handle<Object>(position_value, isolate),
+ array, index, Handle<Object>(position_value, isolate),
NONE, SLOPPY));
- len++;
+ index++;
}
}
- // Advance iterator.
- break_location_iterator.Next();
- if (current_statement_pos != break_location_iterator.statement_position()) {
- break;
- }
}
return *array;
}
@@ -2113,8 +2115,8 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeArgumentsObject(
if (!function->shared()->is_function()) return target;
Maybe<bool> maybe = JSReceiver::HasOwnProperty(
target, isolate->factory()->arguments_string());
- if (!maybe.has_value) return MaybeHandle<JSObject>();
- if (maybe.value) return target;
+ if (!maybe.IsJust()) return MaybeHandle<JSObject>();
+ if (maybe.FromJust()) return target;
// FunctionGetArguments can't throw an exception.
Handle<JSObject> arguments =
@@ -2204,9 +2206,6 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
StackFrame::Id id = UnwrapFrameId(wrapped_id);
JavaScriptFrameIterator it(isolate, id);
JavaScriptFrame* frame = it.frame();
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
- Handle<SharedFunctionInfo> outer_info(function->shared());
// Traverse the saved contexts chain to find the active context for the
// selected frame.
@@ -2216,16 +2215,29 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
isolate->set_context(*(save->context()));
// Materialize stack locals and the arguments object.
- Handle<JSObject> materialized = NewJSObjectWithNullProto(isolate);
+ Handle<JSObject> materialized;
+ Handle<JSFunction> function;
+ Handle<SharedFunctionInfo> outer_info;
+ Handle<Context> eval_context;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, materialized,
- MaterializeStackLocalsWithFrameInspector(isolate, materialized, function,
- &frame_inspector));
+ // We need to limit the lifetime of the FrameInspector because evaluation can
+ // call arbitrary code and only one FrameInspector can be active at a time.
+ {
+ FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
+ materialized = NewJSObjectWithNullProto(isolate);
+ function = handle(JSFunction::cast(frame_inspector.GetFunction()));
+ outer_info = handle(function->shared());
+ eval_context = handle(Context::cast(frame_inspector.GetContext()));
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, materialized,
- MaterializeArgumentsObject(isolate, materialized, function));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, materialized,
+ MaterializeStackLocalsWithFrameInspector(isolate, materialized,
+ function, &frame_inspector));
+
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, materialized,
+ MaterializeArgumentsObject(isolate, materialized, function));
+ }
// At this point, the lookup chain may look like this:
// [inner context] -> [function stack]+[function context] -> [outer context]
@@ -2242,7 +2254,6 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
// This could cause lookup failures if debug-evaluate creates a closure that
// uses this temporary context chain.
- Handle<Context> eval_context(Context::cast(frame_inspector.GetContext()));
DCHECK(!eval_context.is_null());
Handle<Context> function_context = eval_context;
Handle<Context> outer_context(function->context(), isolate);
@@ -2372,7 +2383,7 @@ static int DebugReferencedBy(HeapIterator* iterator, JSObject* target,
// checked in the context of functions using them.
JSObject* obj = JSObject::cast(heap_obj);
if (obj->IsJSContextExtensionObject() ||
- obj->map()->constructor() == arguments_function) {
+ obj->map()->GetConstructor() == arguments_function) {
continue;
}
@@ -2435,7 +2446,7 @@ RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
// Get the constructor function for context extension and arguments array.
Handle<JSFunction> arguments_function(
- JSFunction::cast(isolate->sloppy_arguments_map()->constructor()));
+ JSFunction::cast(isolate->sloppy_arguments_map()->GetConstructor()));
// Get the number of referencing objects.
int count;
@@ -2483,7 +2494,7 @@ static int DebugConstructedBy(HeapIterator* iterator, JSFunction* constructor,
// Only look at all JSObjects.
if (heap_obj->IsJSObject()) {
JSObject* obj = JSObject::cast(heap_obj);
- if (obj->map()->constructor() == constructor) {
+ if (obj->map()->GetConstructor() == constructor) {
// Valid reference found add to instance array if supplied an update
// count.
if (instances != NULL && count < instances_size) {
@@ -2571,46 +2582,21 @@ RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
}
-RUNTIME_FUNCTION(Runtime_DebugDisassembleFunction) {
- HandleScope scope(isolate);
-#ifdef DEBUG
+RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
+ SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
- // Get the function and make sure it is compiled.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
- return isolate->heap()->exception();
- }
- OFStream os(stdout);
- func->code()->Print(os);
- os << std::endl;
-#endif // DEBUG
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_DebugDisassembleConstructor) {
- HandleScope scope(isolate);
-#ifdef DEBUG
- DCHECK(args.length() == 1);
- // Get the function and make sure it is compiled.
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
- if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
- return isolate->heap()->exception();
- }
- OFStream os(stdout);
- func->shared()->construct_stub()->Print(os);
- os << std::endl;
-#endif // DEBUG
- return isolate->heap()->undefined_value();
+ CONVERT_ARG_CHECKED(JSFunction, f, 0);
+ return f->shared()->inferred_name();
}
-RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
- SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_FunctionGetDebugName) {
+ HandleScope scope(isolate);
DCHECK(args.length() == 1);
- CONVERT_ARG_CHECKED(JSFunction, f, 0);
- return f->shared()->inferred_name();
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
+ return *JSFunction::GetDebugName(f);
}
@@ -2656,21 +2642,15 @@ RUNTIME_FUNCTION(Runtime_GetFunctionCodePositionFromSource) {
// to have a stack with C++ frame in the middle.
RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
HandleScope scope(isolate);
- DCHECK(args.length() == 2);
+ DCHECK(args.length() == 1);
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1);
- MaybeHandle<Object> maybe_result;
- if (without_debugger) {
- maybe_result = Execution::Call(isolate, function,
- handle(function->global_proxy()), 0, NULL);
- } else {
- DebugScope debug_scope(isolate->debug());
- maybe_result = Execution::Call(isolate, function,
- handle(function->global_proxy()), 0, NULL);
- }
+ DebugScope debug_scope(isolate->debug());
Handle<Object> result;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, maybe_result);
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ Execution::Call(isolate, function, handle(function->global_proxy()), 0,
+ NULL));
return *result;
}
@@ -2739,7 +2719,7 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
}
-// Check whether debugger and is about to step into the callback that is passed
+// Check whether debugger is about to step into the callback that is passed
// to a built-in function such as Array.forEach.
RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
DCHECK(args.length() == 1);
@@ -2749,9 +2729,12 @@ RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
return isolate->heap()->false_value();
}
CONVERT_ARG_CHECKED(Object, callback, 0);
- // We do not step into the callback if it's a builtin or not even a function.
- return isolate->heap()->ToBoolean(callback->IsJSFunction() &&
- !JSFunction::cast(callback)->IsBuiltin());
+ // We do not step into the callback if it's a builtin other than a bound,
+ // or not even a function.
+ return isolate->heap()->ToBoolean(
+ callback->IsJSFunction() &&
+ (!JSFunction::cast(callback)->IsBuiltin() ||
+ JSFunction::cast(callback)->shared()->bound()));
}
@@ -2776,16 +2759,17 @@ RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
// if we do not leave the builtin. To be able to step into the function
// again, we need to clear the step out at this point.
debug->ClearStepOut();
- debug->FloodWithOneShot(fun);
+ debug->FloodWithOneShotGeneric(fun);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
- DCHECK(args.length() == 1);
+ DCHECK(args.length() == 2);
HandleScope scope(isolate);
CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
- isolate->PushPromise(promise);
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 1);
+ isolate->PushPromise(promise, function);
return isolate->heap()->undefined_value();
}
@@ -2816,13 +2800,13 @@ RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) {
}
-RUNTIME_FUNCTION(RuntimeReference_DebugIsActive) {
+RUNTIME_FUNCTION(Runtime_DebugIsActive) {
SealHandleScope shs(isolate);
return Smi::FromInt(isolate->debug()->is_active());
}
-RUNTIME_FUNCTION(RuntimeReference_DebugBreakInOptimizedCode) {
+RUNTIME_FUNCTION(Runtime_DebugBreakInOptimizedCode) {
UNIMPLEMENTED();
return NULL;
}
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index 5d49b23681..c211e1895f 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -7,6 +7,7 @@
#include "src/accessors.h"
#include "src/arguments.h"
#include "src/compiler.h"
+#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/frames.h"
#include "src/runtime/runtime-utils.h"
@@ -294,10 +295,6 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
int number_of_literals = source->NumberOfLiterals();
Handle<FixedArray> literals =
isolate->factory()->NewFixedArray(number_of_literals, TENURED);
- if (number_of_literals > 0) {
- literals->set(JSFunction::kLiteralNativeContextIndex,
- context->native_context());
- }
target->set_context(*context);
target->set_literals(*literals);
@@ -629,13 +626,13 @@ RUNTIME_FUNCTION(Runtime_GetConstructorDelegate) {
}
-RUNTIME_FUNCTION(RuntimeReference_CallFunction) {
+RUNTIME_FUNCTION(Runtime_CallFunction) {
SealHandleScope shs(isolate);
return __RT_impl_Runtime_Call(args, isolate);
}
-RUNTIME_FUNCTION(RuntimeReference_IsConstructCall) {
+RUNTIME_FUNCTION(Runtime_IsConstructCall) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
JavaScriptFrameIterator it(isolate);
@@ -644,7 +641,7 @@ RUNTIME_FUNCTION(RuntimeReference_IsConstructCall) {
}
-RUNTIME_FUNCTION(RuntimeReference_IsFunction) {
+RUNTIME_FUNCTION(Runtime_IsFunction) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index ff07acd304..d8b084431b 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -31,7 +31,6 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
generator->set_receiver(frame->receiver());
generator->set_continuation(0);
generator->set_operand_stack(isolate->heap()->empty_fixed_array());
- generator->set_stack_handler_index(-1);
return *generator;
}
@@ -39,7 +38,7 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
HandleScope handle_scope(isolate);
- DCHECK(args.length() == 1);
+ DCHECK(args.length() == 1 || args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
JavaScriptFrameIterator stack_iterator(isolate);
@@ -52,28 +51,34 @@ RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
DCHECK_LT(0, generator_object->continuation());
// We expect there to be at least two values on the operand stack: the return
- // value of the yield expression, and the argument to this runtime call.
+ // value of the yield expression, and the arguments to this runtime call.
// Neither of those should be saved.
int operands_count = frame->ComputeOperandsCount();
- DCHECK_GE(operands_count, 2);
- operands_count -= 2;
+ DCHECK_GE(operands_count, 1 + args.length());
+ operands_count -= 1 + args.length();
+
+ // Second argument indicates that we need to patch the handler table because
+ // a delegating yield introduced a try-catch statement at expression level,
+ // hence the operand count was off when we statically computed it.
+ // TODO(mstarzinger): This special case disappears with do-expressions.
+ if (args.length() == 2) {
+ CONVERT_SMI_ARG_CHECKED(handler_index, 1);
+ Handle<Code> code(frame->unchecked_code());
+ Handle<HandlerTable> table(HandlerTable::cast(code->handler_table()));
+ int handler_depth = operands_count - TryBlockConstant::kElementCount;
+ table->SetRangeDepth(handler_index, handler_depth);
+ }
if (operands_count == 0) {
// Although it's semantically harmless to call this function with an
// operands_count of zero, it is also unnecessary.
DCHECK_EQ(generator_object->operand_stack(),
isolate->heap()->empty_fixed_array());
- DCHECK_EQ(generator_object->stack_handler_index(), -1);
- // If there are no operands on the stack, there shouldn't be a handler
- // active either.
- DCHECK(!frame->HasHandler());
} else {
- int stack_handler_index = -1;
Handle<FixedArray> operand_stack =
isolate->factory()->NewFixedArray(operands_count);
- frame->SaveOperandStack(*operand_stack, &stack_handler_index);
+ frame->SaveOperandStack(*operand_stack);
generator_object->set_operand_stack(*operand_stack);
- generator_object->set_stack_handler_index(stack_handler_index);
}
return isolate->heap()->undefined_value();
@@ -115,10 +120,8 @@ RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
FixedArray* operand_stack = generator_object->operand_stack();
int operands_count = operand_stack->length();
if (operands_count != 0) {
- frame->RestoreOperandStack(operand_stack,
- generator_object->stack_handler_index());
+ frame->RestoreOperandStack(operand_stack);
generator_object->set_operand_stack(isolate->heap()->empty_fixed_array());
- generator_object->set_stack_handler_index(-1);
}
JSGeneratorObject::ResumeMode resume_mode =
@@ -213,13 +216,13 @@ RUNTIME_FUNCTION(Runtime_FunctionIsGenerator) {
}
-RUNTIME_FUNCTION(RuntimeReference_GeneratorNext) {
+RUNTIME_FUNCTION(Runtime_GeneratorNext) {
UNREACHABLE(); // Optimization disabled in SetUpGenerators().
return NULL;
}
-RUNTIME_FUNCTION(RuntimeReference_GeneratorThrow) {
+RUNTIME_FUNCTION(Runtime_GeneratorThrow) {
UNREACHABLE(); // Optimization disabled in SetUpGenerators().
return NULL;
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 50b61921f5..973c204435 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -26,7 +26,6 @@ RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
RUNTIME_FUNCTION(Runtime_Throw) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
-
return isolate->Throw(args[0]);
}
@@ -34,11 +33,17 @@ RUNTIME_FUNCTION(Runtime_Throw) {
RUNTIME_FUNCTION(Runtime_ReThrow) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
-
return isolate->ReThrow(args[0]);
}
+RUNTIME_FUNCTION(Runtime_FindExceptionHandler) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 0);
+ return isolate->FindHandler();
+}
+
+
RUNTIME_FUNCTION(Runtime_PromoteScheduledException) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
@@ -55,6 +60,16 @@ RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
}
+RUNTIME_FUNCTION(Runtime_ThrowIteratorResultNotAnObject) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError("iterator_result_not_an_object", HandleVector(&value, 1)));
+}
+
+
RUNTIME_FUNCTION(Runtime_PromiseRejectEvent) {
DCHECK(args.length() == 3);
HandleScope scope(isolate);
@@ -164,29 +179,21 @@ RUNTIME_FUNCTION(Runtime_RenderCallSite) {
if (location.start_pos() == -1) return isolate->heap()->empty_string();
Zone zone;
- if (location.function()->shared()->is_function()) {
- CompilationInfo info(location.function(), &zone);
- if (!Parser::ParseStatic(&info)) {
- isolate->clear_pending_exception();
- return isolate->heap()->empty_string();
- }
- CallPrinter printer(isolate, &zone);
- const char* string = printer.Print(info.function(), location.start_pos());
- return *isolate->factory()->NewStringFromAsciiChecked(string);
- }
+ SmartPointer<ParseInfo> info(location.function()->shared()->is_function()
+ ? new ParseInfo(&zone, location.function())
+ : new ParseInfo(&zone, location.script()));
- CompilationInfo info(location.script(), &zone);
- if (!Parser::ParseStatic(&info)) {
+ if (!Parser::ParseStatic(info.get())) {
isolate->clear_pending_exception();
return isolate->heap()->empty_string();
}
CallPrinter printer(isolate, &zone);
- const char* string = printer.Print(info.function(), location.start_pos());
+ const char* string = printer.Print(info->function(), location.start_pos());
return *isolate->factory()->NewStringFromAsciiChecked(string);
}
-RUNTIME_FUNCTION(Runtime_GetFromCache) {
+RUNTIME_FUNCTION(Runtime_GetFromCacheRT) {
SealHandleScope shs(isolate);
// This is only called from codegen, so checks might be more lax.
CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
@@ -304,12 +311,24 @@ RUNTIME_FUNCTION(Runtime_IS_VAR) {
}
-RUNTIME_FUNCTION(RuntimeReference_GetFromCache) {
+RUNTIME_FUNCTION(Runtime_GetFromCache) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_SMI_ARG_CHECKED(id, 0);
args[0] = isolate->native_context()->jsfunction_result_caches()->get(id);
- return __RT_impl_Runtime_GetFromCache(args, isolate);
+ return __RT_impl_Runtime_GetFromCacheRT(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(Runtime_IncrementStatsCounter) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(String, name, 0);
+
+ if (FLAG_native_code_counters) {
+ StatsCounter(isolate, name->ToCString().get()).Increment();
+ }
+ return isolate->heap()->undefined_value();
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 1ea9c81968..76226d68f5 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -42,14 +42,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
Isolate* isolate, Handle<FixedArray> literals,
Handle<FixedArray> constant_properties, bool should_have_fast_elements,
bool has_function_literal) {
- // Get the native context from the literals array. This is the
- // context in which the function was created and we use the object
- // function from this context to create the object literal. We do
- // not use the object function from the current native context
- // because this might be the object function from another context
- // which we should not have access to.
- Handle<Context> context =
- Handle<Context>(JSFunction::NativeContextFromLiterals(*literals));
+ Handle<Context> context = isolate->native_context();
// In case we have function literals, we want the object to be in
// slow properties mode for now. We don't go in the map cache because
@@ -146,8 +139,7 @@ MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
Isolate* isolate, Handle<FixedArray> literals,
Handle<FixedArray> elements) {
// Create the JSArray.
- Handle<JSFunction> constructor(
- JSFunction::NativeContextFromLiterals(*literals)->array_function());
+ Handle<JSFunction> constructor = isolate->array_function();
PretenureFlag pretenure_flag =
isolate->heap()->InNewSpace(*literals) ? NOT_TENURED : TENURED;
diff --git a/deps/v8/src/runtime/runtime-liveedit.cc b/deps/v8/src/runtime/runtime-liveedit.cc
index b453d15459..e4c644e168 100644
--- a/deps/v8/src/runtime/runtime-liveedit.cc
+++ b/deps/v8/src/runtime/runtime-liveedit.cc
@@ -234,8 +234,9 @@ RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
RUNTIME_ASSERT(shared_array->HasFastElements())
int array_length = Smi::cast(shared_array->length())->value();
for (int i = 0; i < array_length; i++) {
- Handle<Object> element =
- Object::GetElement(isolate, shared_array, i).ToHandleChecked();
+ Handle<Object> element;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, element, Object::GetElement(isolate, shared_array, i));
RUNTIME_ASSERT(
element->IsJSValue() &&
Handle<JSValue>::cast(element)->value()->IsSharedFunctionInfo());
diff --git a/deps/v8/src/runtime/runtime-maths.cc b/deps/v8/src/runtime/runtime-maths.cc
index 68dfa49af8..2941b580f5 100644
--- a/deps/v8/src/runtime/runtime-maths.cc
+++ b/deps/v8/src/runtime/runtime-maths.cc
@@ -34,9 +34,10 @@ RUNTIME_FUNCTION(Runtime_DoubleHi) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- uint64_t integer = double_to_uint64(x);
- integer = (integer >> 32) & 0xFFFFFFFFu;
- return *isolate->factory()->NewNumber(static_cast<int32_t>(integer));
+ uint64_t unsigned64 = double_to_uint64(x);
+ uint32_t unsigned32 = static_cast<uint32_t>(unsigned64 >> 32);
+ int32_t signed32 = bit_cast<int32_t, uint32_t>(unsigned32);
+ return *isolate->factory()->NewNumber(signed32);
}
@@ -44,8 +45,10 @@ RUNTIME_FUNCTION(Runtime_DoubleLo) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- return *isolate->factory()->NewNumber(
- static_cast<int32_t>(double_to_uint64(x) & 0xFFFFFFFFu));
+ uint64_t unsigned64 = double_to_uint64(x);
+ uint32_t unsigned32 = static_cast<uint32_t>(unsigned64);
+ int32_t signed32 = bit_cast<int32_t, uint32_t>(unsigned32);
+ return *isolate->factory()->NewNumber(signed32);
}
@@ -109,7 +112,18 @@ RUNTIME_FUNCTION(Runtime_MathExpRT) {
}
-RUNTIME_FUNCTION(Runtime_MathFloorRT) {
+RUNTIME_FUNCTION(Runtime_MathClz32) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ isolate->counters()->math_clz32()->Increment();
+
+ CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
+ return *isolate->factory()->NewNumberFromUint(
+ base::bits::CountLeadingZeros32(x));
+}
+
+
+RUNTIME_FUNCTION(Runtime_MathFloor) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
isolate->counters()->math_floor()->Increment();
@@ -204,7 +218,7 @@ RUNTIME_FUNCTION(Runtime_RoundNumber) {
}
-RUNTIME_FUNCTION(Runtime_MathSqrtRT) {
+RUNTIME_FUNCTION(Runtime_MathSqrt) {
HandleScope scope(isolate);
DCHECK(args.length() == 1);
isolate->counters()->math_sqrt()->Increment();
@@ -224,13 +238,13 @@ RUNTIME_FUNCTION(Runtime_MathFround) {
}
-RUNTIME_FUNCTION(RuntimeReference_MathPow) {
+RUNTIME_FUNCTION(Runtime_MathPow) {
SealHandleScope shs(isolate);
return __RT_impl_Runtime_MathPowSlow(args, isolate);
}
-RUNTIME_FUNCTION(RuntimeReference_IsMinusZero) {
+RUNTIME_FUNCTION(Runtime_IsMinusZero) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
diff --git a/deps/v8/src/runtime/runtime-numbers.cc b/deps/v8/src/runtime/runtime-numbers.cc
index a7a15e476b..36ca87bc07 100644
--- a/deps/v8/src/runtime/runtime-numbers.cc
+++ b/deps/v8/src/runtime/runtime-numbers.cc
@@ -572,13 +572,13 @@ RUNTIME_FUNCTION(Runtime_MaxSmi) {
}
-RUNTIME_FUNCTION(RuntimeReference_NumberToString) {
+RUNTIME_FUNCTION(Runtime_NumberToString) {
SealHandleScope shs(isolate);
return __RT_impl_Runtime_NumberToStringRT(args, isolate);
}
-RUNTIME_FUNCTION(RuntimeReference_IsSmi) {
+RUNTIME_FUNCTION(Runtime_IsSmi) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
@@ -586,7 +586,7 @@ RUNTIME_FUNCTION(RuntimeReference_IsSmi) {
}
-RUNTIME_FUNCTION(RuntimeReference_IsNonNegativeSmi) {
+RUNTIME_FUNCTION(Runtime_IsNonNegativeSmi) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 96d9331038..c387b370fa 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -249,12 +249,10 @@ MaybeHandle<Object> Runtime::GetPrototype(Isolate* isolate,
PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
do {
if (PrototypeIterator::GetCurrent(iter)->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
- isolate->factory()->proto_string(), v8::ACCESS_GET)) {
+ !isolate->MayAccess(
+ Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)))) {
isolate->ReportFailedAccessCheck(
- Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
- v8::ACCESS_GET);
+ Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
}
@@ -297,10 +295,8 @@ RUNTIME_FUNCTION(Runtime_SetPrototype) {
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
- if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(obj, isolate->factory()->proto_string(),
- v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(obj, v8::ACCESS_SET);
+ if (obj->IsAccessCheckNeeded() && !isolate->MayAccess(obj)) {
+ isolate->ReportFailedAccessCheck(obj);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return isolate->heap()->undefined_value();
}
@@ -372,8 +368,8 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
// Get attributes.
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnElementAttribute(obj, index);
- if (!maybe.has_value) return MaybeHandle<Object>();
- attrs = maybe.value;
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
+ attrs = maybe.FromJust();
if (attrs == ABSENT) return factory->undefined_value();
// Get AccessorPair if present.
@@ -389,8 +385,8 @@ MUST_USE_RESULT static MaybeHandle<Object> GetOwnProperty(Isolate* isolate,
// Get attributes.
LookupIterator it(obj, name, LookupIterator::HIDDEN);
Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
- if (!maybe.has_value) return MaybeHandle<Object>();
- attrs = maybe.value;
+ if (!maybe.IsJust()) return MaybeHandle<Object>();
+ attrs = maybe.FromJust();
if (attrs == ABSENT) return factory->undefined_value();
// Get AccessorPair if present.
@@ -609,6 +605,7 @@ RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
(dictionary->DetailsAt(entry).type() == DATA)) {
Object* value = dictionary->ValueAt(entry);
if (!receiver->IsGlobalObject()) return value;
+ DCHECK(value->IsPropertyCell());
value = PropertyCell::cast(value)->value();
if (!value->IsTheHole()) return value;
// If value is the hole (meaning, absent) do the general lookup.
@@ -671,7 +668,7 @@ RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
DCHECK(!key->ToArrayIndex(&index));
LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.has_value) return isolate->heap()->exception();
+ if (!maybe.IsJust()) return isolate->heap()->exception();
RUNTIME_ASSERT(!it.IsFound());
#endif
@@ -740,8 +737,8 @@ static Object* HasOwnPropertyImplementation(Isolate* isolate,
Handle<JSObject> object,
Handle<Name> key) {
Maybe<bool> maybe = JSReceiver::HasOwnProperty(object, key);
- if (!maybe.has_value) return isolate->heap()->exception();
- if (maybe.value) return isolate->heap()->true_value();
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ if (maybe.FromJust()) return isolate->heap()->true_value();
// Handle hidden prototypes. If there's a hidden prototype above this thing
// then we have to check it for properties, because they are supposed to
// look like they are on this object.
@@ -776,15 +773,15 @@ RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
// Fast case: either the key is a real named property or it is not
// an array index and there are no interceptors or hidden
// prototypes.
- Maybe<bool> maybe;
+ Maybe<bool> maybe = Nothing<bool>();
if (key_is_array_index) {
maybe = JSObject::HasOwnElement(js_obj, index);
} else {
maybe = JSObject::HasRealNamedProperty(js_obj, key);
}
- if (!maybe.has_value) return isolate->heap()->exception();
+ if (!maybe.IsJust()) return isolate->heap()->exception();
DCHECK(!isolate->has_pending_exception());
- if (maybe.value) {
+ if (maybe.FromJust()) {
return isolate->heap()->true_value();
}
Map* map = js_obj->map();
@@ -813,8 +810,8 @@ RUNTIME_FUNCTION(Runtime_HasProperty) {
CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
Maybe<bool> maybe = JSReceiver::HasProperty(receiver, key);
- if (!maybe.has_value) return isolate->heap()->exception();
- return isolate->heap()->ToBoolean(maybe.value);
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ return isolate->heap()->ToBoolean(maybe.FromJust());
}
@@ -825,8 +822,8 @@ RUNTIME_FUNCTION(Runtime_HasElement) {
CONVERT_SMI_ARG_CHECKED(index, 1);
Maybe<bool> maybe = JSReceiver::HasElement(receiver, index);
- if (!maybe.has_value) return isolate->heap()->exception();
- return isolate->heap()->ToBoolean(maybe.value);
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ return isolate->heap()->ToBoolean(maybe.FromJust());
}
@@ -839,9 +836,9 @@ RUNTIME_FUNCTION(Runtime_IsPropertyEnumerable) {
Maybe<PropertyAttributes> maybe =
JSReceiver::GetOwnPropertyAttributes(object, key);
- if (!maybe.has_value) return isolate->heap()->exception();
- if (maybe.value == ABSENT) maybe.value = DONT_ENUM;
- return isolate->heap()->ToBoolean((maybe.value & DONT_ENUM) == 0);
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ if (maybe.FromJust() == ABSENT) maybe = Just(DONT_ENUM);
+ return isolate->heap()->ToBoolean((maybe.FromJust() & DONT_ENUM) == 0);
}
@@ -917,10 +914,8 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
// real global object.
if (obj->IsJSGlobalProxy()) {
// Only collect names if access is permitted.
- if (obj->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(obj, isolate->factory()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(obj, v8::ACCESS_KEYS);
+ if (obj->IsAccessCheckNeeded() && !isolate->MayAccess(obj)) {
+ isolate->ReportFailedAccessCheck(obj);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -941,11 +936,8 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
Handle<JSObject> jsproto =
Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
// Only collect names if access is permitted.
- if (jsproto->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(jsproto,
- isolate->factory()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(jsproto, v8::ACCESS_KEYS);
+ if (jsproto->IsAccessCheckNeeded() && !isolate->MayAccess(jsproto)) {
+ isolate->ReportFailedAccessCheck(jsproto);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -1094,10 +1086,8 @@ RUNTIME_FUNCTION(Runtime_OwnKeys) {
if (object->IsJSGlobalProxy()) {
// Do access checks before going to the global object.
- if (object->IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(object, isolate->factory()->undefined_value(),
- v8::ACCESS_KEYS)) {
- isolate->ReportFailedAccessCheck(object, v8::ACCESS_KEYS);
+ if (object->IsAccessCheckNeeded() && !isolate->MayAccess(object)) {
+ isolate->ReportFailedAccessCheck(object);
RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
return *isolate->factory()->NewJSArray(0);
}
@@ -1442,7 +1432,7 @@ RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
LookupIterator it(js_object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
if (it.IsFound() && it.state() == LookupIterator::ACCESS_CHECK) {
- if (!isolate->MayNamedAccess(js_object, name, v8::ACCESS_SET)) {
+ if (!isolate->MayAccess(js_object)) {
return isolate->heap()->undefined_value();
}
it.Next();
@@ -1488,7 +1478,7 @@ RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
}
-RUNTIME_FUNCTION(RuntimeReference_ValueOf) {
+RUNTIME_FUNCTION(Runtime_ValueOf) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
@@ -1497,7 +1487,7 @@ RUNTIME_FUNCTION(RuntimeReference_ValueOf) {
}
-RUNTIME_FUNCTION(RuntimeReference_SetValueOf) {
+RUNTIME_FUNCTION(Runtime_SetValueOf) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_CHECKED(Object, obj, 0);
@@ -1508,7 +1498,31 @@ RUNTIME_FUNCTION(RuntimeReference_SetValueOf) {
}
-RUNTIME_FUNCTION(RuntimeReference_ObjectEquals) {
+RUNTIME_FUNCTION(Runtime_JSValueGetValue) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSValue, obj, 0);
+ return JSValue::cast(obj)->value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_HeapObjectGetMap) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(HeapObject, obj, 0);
+ return obj->map();
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapGetInstanceType) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_CHECKED(Map, map, 0);
+ return Smi::FromInt(map->instance_type());
+}
+
+
+RUNTIME_FUNCTION(Runtime_ObjectEquals) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_CHECKED(Object, obj1, 0);
@@ -1517,7 +1531,7 @@ RUNTIME_FUNCTION(RuntimeReference_ObjectEquals) {
}
-RUNTIME_FUNCTION(RuntimeReference_IsObject) {
+RUNTIME_FUNCTION(Runtime_IsObject) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
@@ -1532,7 +1546,7 @@ RUNTIME_FUNCTION(RuntimeReference_IsObject) {
}
-RUNTIME_FUNCTION(RuntimeReference_IsUndetectableObject) {
+RUNTIME_FUNCTION(Runtime_IsUndetectableObject) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
@@ -1540,7 +1554,7 @@ RUNTIME_FUNCTION(RuntimeReference_IsUndetectableObject) {
}
-RUNTIME_FUNCTION(RuntimeReference_IsSpecObject) {
+RUNTIME_FUNCTION(Runtime_IsSpecObject) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
@@ -1548,7 +1562,7 @@ RUNTIME_FUNCTION(RuntimeReference_IsSpecObject) {
}
-RUNTIME_FUNCTION(RuntimeReference_ClassOf) {
+RUNTIME_FUNCTION(Runtime_ClassOf) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
diff --git a/deps/v8/src/runtime/runtime-proxy.cc b/deps/v8/src/runtime/runtime-proxy.cc
index baf7cdb49d..703d72b062 100644
--- a/deps/v8/src/runtime/runtime-proxy.cc
+++ b/deps/v8/src/runtime/runtime-proxy.cc
@@ -34,7 +34,7 @@ RUNTIME_FUNCTION(Runtime_CreateJSFunctionProxy) {
}
-RUNTIME_FUNCTION(RuntimeReference_IsJSProxy) {
+RUNTIME_FUNCTION(Runtime_IsJSProxy) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index 57ff1b2f51..5846881f90 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -759,7 +759,7 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
}
-RUNTIME_FUNCTION(Runtime_RegExpExecRT) {
+RUNTIME_FUNCTION(Runtime_RegExpExec) {
HandleScope scope(isolate);
DCHECK(args.length() == 4);
CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
@@ -779,7 +779,7 @@ RUNTIME_FUNCTION(Runtime_RegExpExecRT) {
}
-RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
+RUNTIME_FUNCTION(Runtime_RegExpConstructResultRT) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 3);
CONVERT_SMI_ARG_CHECKED(size, 0);
@@ -800,6 +800,12 @@ RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
}
+RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_RegExpConstructResultRT(args, isolate);
+}
+
+
static JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags,
bool* success) {
uint32_t value = JSRegExp::NONE;
@@ -866,7 +872,7 @@ RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
Handle<Object> unicode = factory->ToBoolean(flags.is_unicode());
Map* map = regexp->map();
- Object* constructor = map->constructor();
+ Object* constructor = map->GetConstructor();
if (!FLAG_harmony_regexps && !FLAG_harmony_unicode_regexps &&
constructor->IsJSFunction() &&
JSFunction::cast(constructor)->initial_map() == map) {
@@ -925,13 +931,7 @@ RUNTIME_FUNCTION(Runtime_MaterializeRegExpLiteral) {
CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
CONVERT_ARG_HANDLE_CHECKED(String, flags, 3);
- // Get the RegExp function from the context in the literals array.
- // This is the RegExp function from the context in which the
- // function was created. We do not use the RegExp function from the
- // current native context because this might be the RegExp function
- // from another context which we should not have access to.
- Handle<JSFunction> constructor = Handle<JSFunction>(
- JSFunction::NativeContextFromLiterals(*literals)->regexp_function());
+ Handle<JSFunction> constructor = isolate->regexp_function();
// Compute the regular expression literal.
Handle<Object> regexp;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -1110,19 +1110,16 @@ RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
}
-RUNTIME_FUNCTION(RuntimeReference_RegExpConstructResult) {
+RUNTIME_FUNCTION(Runtime_RegExpExecReThrow) {
SealHandleScope shs(isolate);
- return __RT_impl_Runtime_RegExpConstructResult(args, isolate);
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_RegExpExec) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_RegExpExecRT(args, isolate);
+ DCHECK(args.length() == 4);
+ Object* exception = isolate->pending_exception();
+ isolate->clear_pending_exception();
+ return isolate->ReThrow(exception);
}
-RUNTIME_FUNCTION(RuntimeReference_IsRegExp) {
+RUNTIME_FUNCTION(Runtime_IsRegExp) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 1);
CONVERT_ARG_CHECKED(Object, obj, 0);
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index 7eb2e0cfc1..2a2ab16d45 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -26,7 +26,7 @@ RUNTIME_FUNCTION(Runtime_ThrowConstAssignError) {
HandleScope scope(isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
- NewTypeError("harmony_const_assign", HandleVector<Object>(NULL, 0)));
+ NewTypeError("const_assign", HandleVector<Object>(NULL, 0)));
}
@@ -46,10 +46,10 @@ static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
// Do the lookup own properties only, see ES5 erratum.
LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.has_value) return isolate->heap()->exception();
+ if (!maybe.IsJust()) return isolate->heap()->exception();
if (it.IsFound()) {
- PropertyAttributes old_attributes = maybe.value;
+ PropertyAttributes old_attributes = maybe.FromJust();
// The name was declared before; check for conflicting re-declarations.
if (is_const) return ThrowRedeclarationError(isolate, name);
@@ -178,8 +178,8 @@ RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
// Lookup the property as own on the global object.
LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- DCHECK(maybe.has_value);
- PropertyAttributes old_attributes = maybe.value;
+ DCHECK(maybe.IsJust());
+ PropertyAttributes old_attributes = maybe.FromJust();
PropertyAttributes attr =
static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
@@ -331,8 +331,8 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
LookupIterator it(holder, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.has_value) return isolate->heap()->exception();
- PropertyAttributes old_attributes = maybe.value;
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ PropertyAttributes old_attributes = maybe.FromJust();
// Ignore if we can't reconfigure the value.
if ((old_attributes & DONT_DELETE) != 0) {
@@ -596,8 +596,8 @@ static Object* FindNameClash(Handle<ScopeInfo> scope_info,
LookupIterator it(global_object, name,
LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
- if (!maybe.has_value) return isolate->heap()->exception();
- if ((maybe.value & DONT_DELETE) != 0) {
+ if (!maybe.IsJust()) return isolate->heap()->exception();
+ if ((maybe.FromJust() & DONT_DELETE) != 0) {
return ThrowRedeclarationError(isolate, name);
}
@@ -790,7 +790,8 @@ RUNTIME_FUNCTION(Runtime_DeclareModules) {
case VAR:
case LET:
case CONST:
- case CONST_LEGACY: {
+ case CONST_LEGACY:
+ case IMPORT: {
PropertyAttributes attr =
IsImmutableVariableMode(mode) ? FROZEN : SEALED;
Handle<AccessorInfo> info =
@@ -855,16 +856,14 @@ RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
static Object* ComputeReceiverForNonGlobal(Isolate* isolate, JSObject* holder) {
DCHECK(!holder->IsGlobalObject());
- Context* top = isolate->context();
- // Get the context extension function.
- JSFunction* context_extension_function =
- top->native_context()->context_extension_function();
+
// If the holder isn't a context extension object, we just return it
// as the receiver. This allows arguments objects to be used as
// receivers, but only if they are put in the context scope chain
// explicitly via a with-statement.
- Object* constructor = holder->map()->constructor();
- if (constructor != context_extension_function) return holder;
+ if (holder->map()->instance_type() != JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
+ return holder;
+ }
// Fall back to using the global object as the implicit receiver if
// the property turns out to be a local variable allocated in a
// context extension object - introduced via eval.
@@ -905,11 +904,9 @@ static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
case MUTABLE_CHECK_INITIALIZED:
case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
if (value->IsTheHole()) {
- Handle<Object> error;
- MaybeHandle<Object> maybe_error =
- isolate->factory()->NewReferenceError("not_defined",
- HandleVector(&name, 1));
- if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
+ Handle<Object> error = isolate->factory()->NewReferenceError(
+ "not_defined", HandleVector(&name, 1));
+ isolate->Throw(*error);
return MakePair(isolate->heap()->exception(), NULL);
}
// FALLTHROUGH
@@ -935,13 +932,6 @@ static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
// property from it.
if (!holder.is_null()) {
Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
-#ifdef DEBUG
- if (!object->IsJSProxy()) {
- Maybe<bool> maybe = JSReceiver::HasProperty(object, name);
- DCHECK(maybe.has_value);
- DCHECK(maybe.value);
- }
-#endif
// GetProperty below can cause GC.
Handle<Object> receiver_handle(
object->IsGlobalObject()
@@ -962,10 +952,9 @@ static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
if (throw_error) {
// The property doesn't exist - throw exception.
- Handle<Object> error;
- MaybeHandle<Object> maybe_error = isolate->factory()->NewReferenceError(
+ Handle<Object> error = isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
- if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
+ isolate->Throw(*error);
return MakePair(isolate->heap()->exception(), NULL);
} else {
// The property doesn't exist - return undefined.
@@ -1117,7 +1106,7 @@ RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
}
-RUNTIME_FUNCTION(RuntimeReference_ArgumentsLength) {
+RUNTIME_FUNCTION(Runtime_ArgumentsLength) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
JavaScriptFrameIterator it(isolate);
@@ -1126,7 +1115,7 @@ RUNTIME_FUNCTION(RuntimeReference_ArgumentsLength) {
}
-RUNTIME_FUNCTION(RuntimeReference_Arguments) {
+RUNTIME_FUNCTION(Runtime_Arguments) {
SealHandleScope shs(isolate);
return __RT_impl_Runtime_GetArgumentsProperty(args, isolate);
}
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index df2210c635..8bfde943dd 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -279,7 +279,7 @@ RUNTIME_FUNCTION(Runtime_StringLocaleCompare) {
}
-RUNTIME_FUNCTION(Runtime_SubString) {
+RUNTIME_FUNCTION(Runtime_SubStringRT) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -307,7 +307,13 @@ RUNTIME_FUNCTION(Runtime_SubString) {
}
-RUNTIME_FUNCTION(Runtime_StringAdd) {
+RUNTIME_FUNCTION(Runtime_SubString) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_SubStringRT(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(Runtime_StringAddRT) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
@@ -320,6 +326,12 @@ RUNTIME_FUNCTION(Runtime_StringAdd) {
}
+RUNTIME_FUNCTION(Runtime_StringAdd) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_StringAddRT(args, isolate);
+}
+
+
RUNTIME_FUNCTION(Runtime_InternalizeString) {
HandleScope handles(isolate);
RUNTIME_ASSERT(args.length() == 1);
@@ -414,7 +426,7 @@ RUNTIME_FUNCTION(Runtime_CharFromCode) {
}
-RUNTIME_FUNCTION(Runtime_StringCompare) {
+RUNTIME_FUNCTION(Runtime_StringCompareRT) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 2);
@@ -483,6 +495,12 @@ RUNTIME_FUNCTION(Runtime_StringCompare) {
}
+RUNTIME_FUNCTION(Runtime_StringCompare) {
+ SealHandleScope shs(isolate);
+ return __RT_impl_Runtime_StringCompareRT(args, isolate);
+}
+
+
RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
HandleScope scope(isolate);
DCHECK(args.length() == 3);
@@ -1203,6 +1221,28 @@ RUNTIME_FUNCTION(Runtime_NewString) {
}
+RUNTIME_FUNCTION(Runtime_NewConsString) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 4);
+ CONVERT_INT32_ARG_CHECKED(length, 0);
+ CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, left, 2);
+ CONVERT_ARG_HANDLE_CHECKED(String, right, 3);
+
+ Handle<String> result;
+ if (is_one_byte) {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewOneByteConsString(length, left, right));
+ } else {
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewTwoByteConsString(length, left, right));
+ }
+ return *result;
+}
+
+
RUNTIME_FUNCTION(Runtime_StringEquals) {
HandleScope handle_scope(isolate);
DCHECK(args.length() == 2);
@@ -1229,13 +1269,13 @@ RUNTIME_FUNCTION(Runtime_FlattenString) {
}
-RUNTIME_FUNCTION(RuntimeReference_StringCharFromCode) {
+RUNTIME_FUNCTION(Runtime_StringCharFromCode) {
SealHandleScope shs(isolate);
return __RT_impl_Runtime_CharFromCode(args, isolate);
}
-RUNTIME_FUNCTION(RuntimeReference_StringCharAt) {
+RUNTIME_FUNCTION(Runtime_StringCharAt) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
if (!args[0]->IsString()) return Smi::FromInt(0);
@@ -1247,7 +1287,16 @@ RUNTIME_FUNCTION(RuntimeReference_StringCharAt) {
}
-RUNTIME_FUNCTION(RuntimeReference_OneByteSeqStringSetChar) {
+RUNTIME_FUNCTION(Runtime_OneByteSeqStringGetChar) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_CHECKED(SeqOneByteString, string, 0);
+ CONVERT_INT32_ARG_CHECKED(index, 1);
+ return Smi::FromInt(string->SeqOneByteStringGet(index));
+}
+
+
+RUNTIME_FUNCTION(Runtime_OneByteSeqStringSetChar) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 3);
CONVERT_INT32_ARG_CHECKED(index, 0);
@@ -1258,7 +1307,16 @@ RUNTIME_FUNCTION(RuntimeReference_OneByteSeqStringSetChar) {
}
-RUNTIME_FUNCTION(RuntimeReference_TwoByteSeqStringSetChar) {
+RUNTIME_FUNCTION(Runtime_TwoByteSeqStringGetChar) {
+ SealHandleScope shs(isolate);
+ DCHECK(args.length() == 2);
+ CONVERT_ARG_CHECKED(SeqTwoByteString, string, 0);
+ CONVERT_INT32_ARG_CHECKED(index, 1);
+ return Smi::FromInt(string->SeqTwoByteStringGet(index));
+}
+
+
+RUNTIME_FUNCTION(Runtime_TwoByteSeqStringSetChar) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 3);
CONVERT_INT32_ARG_CHECKED(index, 0);
@@ -1269,13 +1327,7 @@ RUNTIME_FUNCTION(RuntimeReference_TwoByteSeqStringSetChar) {
}
-RUNTIME_FUNCTION(RuntimeReference_StringCompare) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_StringCompare(args, isolate);
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_StringCharCodeAt) {
+RUNTIME_FUNCTION(Runtime_StringCharCodeAt) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 2);
if (!args[0]->IsString()) return isolate->heap()->undefined_value();
@@ -1285,21 +1337,17 @@ RUNTIME_FUNCTION(RuntimeReference_StringCharCodeAt) {
}
-RUNTIME_FUNCTION(RuntimeReference_SubString) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_SubString(args, isolate);
-}
-
-
-RUNTIME_FUNCTION(RuntimeReference_StringAdd) {
- SealHandleScope shs(isolate);
- return __RT_impl_Runtime_StringAdd(args, isolate);
+RUNTIME_FUNCTION(Runtime_IsStringWrapperSafeForDefaultValueOf) {
+ UNIMPLEMENTED();
+ return NULL;
}
-RUNTIME_FUNCTION(RuntimeReference_IsStringWrapperSafeForDefaultValueOf) {
- UNIMPLEMENTED();
- return NULL;
+RUNTIME_FUNCTION(Runtime_StringGetLength) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
+ return Smi::FromInt(s->length());
}
}
} // namespace v8::internal
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 89e1f2a696..6a812da822 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -7,8 +7,8 @@
#include "src/arguments.h"
#include "src/deoptimizer.h"
#include "src/full-codegen.h"
-#include "src/natives.h"
#include "src/runtime/runtime-utils.h"
+#include "src/snapshot/natives.h"
namespace v8 {
namespace internal {
@@ -30,6 +30,36 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
}
+RUNTIME_FUNCTION(Runtime_DeoptimizeNow) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 0);
+
+ Handle<JSFunction> function;
+
+ // If the argument is 'undefined', deoptimize the topmost
+ // function.
+ JavaScriptFrameIterator it(isolate);
+ while (!it.done()) {
+ if (it.frame()->is_java_script()) {
+ function = Handle<JSFunction>(it.frame()->function());
+ break;
+ }
+ }
+ if (function.is_null()) return isolate->heap()->undefined_value();
+
+ if (!function->IsOptimized()) return isolate->heap()->undefined_value();
+
+ // TODO(turbofan): Deoptimization is not supported yet.
+ if (function->code()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+ return isolate->heap()->undefined_value();
+ }
+
+ Deoptimizer::DeoptimizeFunction(*function);
+
+ return isolate->heap()->undefined_value();
+}
+
+
RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
SealHandleScope shs(isolate);
DCHECK(args.length() == 0);
@@ -59,8 +89,6 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
(function->code()->kind() == Code::FUNCTION &&
function->code()->optimizable()));
- if (!isolate->use_crankshaft()) return isolate->heap()->undefined_value();
-
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
@@ -81,10 +109,10 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
HandleScope scope(isolate);
- RUNTIME_ASSERT(args.length() == 0);
+ RUNTIME_ASSERT(args.length() == 0 || args.length() == 1);
Handle<JSFunction> function = Handle<JSFunction>::null();
- {
+ if (args.length() == 0) {
// Find the JavaScript function on the top of the stack.
JavaScriptFrameIterator it(isolate);
while (!it.done()) {
@@ -94,6 +122,10 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
}
}
if (function.is_null()) return isolate->heap()->undefined_value();
+ } else {
+ // Function was passed as an argument.
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, arg, 0);
+ function = arg;
}
// The following assertion was lifted from the DCHECK inside
@@ -102,8 +134,6 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
(function->code()->kind() == Code::FUNCTION &&
function->code()->optimizable()));
- if (!isolate->use_crankshaft()) return isolate->heap()->undefined_value();
-
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
@@ -346,6 +376,23 @@ RUNTIME_FUNCTION(Runtime_GetV8Version) {
}
+RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
+ HandleScope scope(isolate);
+#ifdef DEBUG
+ DCHECK(args.length() == 1);
+ // Get the function and make sure it is compiled.
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
+ if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
+ return isolate->heap()->exception();
+ }
+ OFStream os(stdout);
+ func->code()->Print(os);
+ os << std::endl;
+#endif // DEBUG
+ return isolate->heap()->undefined_value();
+}
+
+
static int StackSize(Isolate* isolate) {
int n = 0;
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) n++;
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 82224bc9b3..59c417f5b7 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -88,6 +88,8 @@ bool Runtime::SetupArrayBufferAllocatingData(Isolate* isolate,
void Runtime::NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer) {
Isolate* isolate = array_buffer->GetIsolate();
+ // Firstly, iterate over the views which are referenced directly by the array
+ // buffer.
for (Handle<Object> view_obj(array_buffer->weak_first_view(), isolate);
!view_obj->IsUndefined();) {
Handle<JSArrayBufferView> view(JSArrayBufferView::cast(*view_obj));
@@ -100,6 +102,24 @@ void Runtime::NeuterArrayBuffer(Handle<JSArrayBuffer> array_buffer) {
}
view_obj = handle(view->weak_next(), isolate);
}
+
+ // Secondly, iterate over the global list of new space views to find views
+ // that belong to the neutered array buffer.
+ Heap* heap = isolate->heap();
+ for (Handle<Object> view_obj(heap->new_array_buffer_views_list(), isolate);
+ !view_obj->IsUndefined();) {
+ Handle<JSArrayBufferView> view(JSArrayBufferView::cast(*view_obj));
+ if (view->buffer() == *array_buffer) {
+ if (view->IsJSTypedArray()) {
+ JSTypedArray::cast(*view)->Neuter();
+ } else if (view->IsJSDataView()) {
+ JSDataView::cast(*view)->Neuter();
+ } else {
+ UNREACHABLE();
+ }
+ }
+ view_obj = handle(view->weak_next(), isolate);
+ }
array_buffer->Neuter();
}
@@ -265,11 +285,18 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
holder->set_byte_offset(*byte_offset_object);
holder->set_byte_length(*byte_length_object);
+ Heap* heap = isolate->heap();
if (!maybe_buffer->IsNull()) {
Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(maybe_buffer);
holder->set_buffer(*buffer);
- holder->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*holder);
+
+ if (heap->InNewSpace(*holder)) {
+ holder->set_weak_next(heap->new_array_buffer_views_list());
+ heap->set_new_array_buffer_views_list(*holder);
+ } else {
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+ }
Handle<ExternalArray> elements = isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
@@ -367,8 +394,15 @@ RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
isolate->factory()->NewNumberFromSize(byte_length));
holder->set_byte_length(*byte_length_obj);
holder->set_length(*length_obj);
- holder->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*holder);
+
+ Heap* heap = isolate->heap();
+ if (heap->InNewSpace(*holder)) {
+ holder->set_weak_next(heap->new_array_buffer_views_list());
+ heap->set_new_array_buffer_views_list(*holder);
+ } else {
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+ }
Handle<ExternalArray> elements = isolate->factory()->NewExternalArray(
static_cast<int>(length), array_type,
@@ -542,8 +576,14 @@ RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
holder->set_byte_offset(*byte_offset);
holder->set_byte_length(*byte_length);
- holder->set_weak_next(buffer->weak_first_view());
- buffer->set_weak_first_view(*holder);
+ Heap* heap = isolate->heap();
+ if (heap->InNewSpace(*holder)) {
+ holder->set_weak_next(heap->new_array_buffer_views_list());
+ heap->set_new_array_buffer_views_list(*holder);
+ } else {
+ holder->set_weak_next(buffer->weak_first_view());
+ buffer->set_weak_first_view(*holder);
+ }
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index d01c141424..674f1173a7 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -14,25 +14,13 @@ namespace internal {
#define F(name, number_of_args, result_size) \
Object* Runtime_##name(int args_length, Object** args_object, \
Isolate* isolate);
+FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
+#undef F
#define P(name, number_of_args, result_size) \
ObjectPair Runtime_##name(int args_length, Object** args_object, \
Isolate* isolate);
-
-// Reference implementation for inlined runtime functions. Only used when the
-// compiler does not support a certain intrinsic. Don't optimize these, but
-// implement the intrinsic in the respective compiler instead.
-#define I(name, number_of_args, result_size) \
- Object* RuntimeReference_##name(int args_length, Object** args_object, \
- Isolate* isolate);
-
-RUNTIME_FUNCTION_LIST_RETURN_OBJECT(F)
-RUNTIME_FUNCTION_LIST_RETURN_PAIR(P)
-INLINE_OPTIMIZED_FUNCTION_LIST(F)
-INLINE_FUNCTION_LIST(I)
-
-#undef I
-#undef F
+FOR_EACH_INTRINSIC_RETURN_PAIR(P)
#undef P
@@ -44,27 +32,17 @@ INLINE_FUNCTION_LIST(I)
,
-#define I(name, number_of_args, result_size) \
- { \
- Runtime::kInline##name, Runtime::INLINE, "_" #name, \
- FUNCTION_ADDR(RuntimeReference_##name), number_of_args, result_size \
- } \
- ,
-
-
-#define IO(name, number_of_args, result_size) \
- { \
- Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, "_" #name, \
- FUNCTION_ADDR(Runtime_##name), number_of_args, result_size \
- } \
+#define I(name, number_of_args, result_size) \
+ { \
+ Runtime::kInline##name, Runtime::INLINE, "_" #name, \
+ FUNCTION_ADDR(Runtime_##name), number_of_args, result_size \
+ } \
,
static const Runtime::Function kIntrinsicFunctions[] = {
- RUNTIME_FUNCTION_LIST(F) INLINE_OPTIMIZED_FUNCTION_LIST(F)
- INLINE_FUNCTION_LIST(I) INLINE_OPTIMIZED_FUNCTION_LIST(IO)};
+ FOR_EACH_INTRINSIC(F) FOR_EACH_INTRINSIC(I)};
-#undef IO
#undef I
#undef F
@@ -78,7 +56,7 @@ void Runtime::InitializeIntrinsicFunctionNames(Isolate* isolate,
if (name == NULL) continue;
Handle<NameDictionary> new_dict = NameDictionary::Add(
dict, isolate->factory()->InternalizeUtf8String(name),
- Handle<Smi>(Smi::FromInt(i), isolate), PropertyDetails(NONE, DATA, 0));
+ Handle<Smi>(Smi::FromInt(i), isolate), PropertyDetails::Empty());
// The dictionary does not need to grow.
CHECK(new_dict.is_identical_to(dict));
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 12f7af47c9..e2ee5bf5a8 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -15,190 +15,192 @@ namespace internal {
// The interface to C++ runtime functions.
// ----------------------------------------------------------------------------
-// RUNTIME_FUNCTION_LIST_ALWAYS defines runtime calls available in both
-// release and debug mode.
-// This macro should only be used by the macro RUNTIME_FUNCTION_LIST.
-
// WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused
// MSVC Intellisense to crash. It was broken into two macros to work around
// this problem. Please avoid large recursive macros whenever possible.
-#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
- /* Property access */ \
- F(GetProperty, 2, 1) \
- F(KeyedGetProperty, 2, 1) \
- F(DeleteProperty, 3, 1) \
- F(HasOwnProperty, 2, 1) \
- F(HasProperty, 2, 1) \
- F(HasElement, 2, 1) \
- F(IsPropertyEnumerable, 2, 1) \
- F(GetPropertyNames, 1, 1) \
- F(GetPropertyNamesFast, 1, 1) \
- F(GetOwnPropertyNames, 2, 1) \
- F(GetOwnElementNames, 1, 1) \
- F(GetInterceptorInfo, 1, 1) \
- F(GetNamedInterceptorPropertyNames, 1, 1) \
- F(GetIndexedInterceptorElementNames, 1, 1) \
- F(GetArgumentsProperty, 1, 1) \
- F(ToFastProperties, 1, 1) \
- F(FinishArrayPrototypeSetup, 1, 1) \
- F(SpecialArrayFunctions, 0, 1) \
- F(IsSloppyModeFunction, 1, 1) \
- F(GetDefaultReceiver, 1, 1) \
- \
- F(SetPrototype, 2, 1) \
- F(InternalSetPrototype, 2, 1) \
- F(IsInPrototypeChain, 2, 1) \
- \
- F(GetOwnProperty, 2, 1) \
- \
- F(IsExtensible, 1, 1) \
- F(PreventExtensions, 1, 1) \
- \
- /* Utilities */ \
- F(CheckIsBootstrapping, 0, 1) \
- F(GetRootNaN, 0, 1) \
- F(Call, -1 /* >= 2 */, 1) \
- F(Apply, 5, 1) \
- F(GetFunctionDelegate, 1, 1) \
- F(GetConstructorDelegate, 1, 1) \
- F(DeoptimizeFunction, 1, 1) \
- F(ClearFunctionTypeFeedback, 1, 1) \
- F(RunningInSimulator, 0, 1) \
- F(IsConcurrentRecompilationSupported, 0, 1) \
- F(OptimizeFunctionOnNextCall, -1, 1) \
- F(OptimizeOsr, 0, 1) \
- F(NeverOptimizeFunction, 1, 1) \
- F(GetOptimizationStatus, -1, 1) \
- F(GetOptimizationCount, 1, 1) \
- F(UnblockConcurrentRecompilation, 0, 1) \
- F(CompileForOnStackReplacement, 1, 1) \
- F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
- F(SetNativeFlag, 1, 1) \
- F(IsConstructor, 1, 1) \
- F(SetInlineBuiltinFlag, 1, 1) \
- F(StoreArrayLiteralElement, 5, 1) \
- F(DebugPrepareStepInIfStepping, 1, 1) \
- F(DebugPushPromise, 1, 1) \
- F(DebugPopPromise, 0, 1) \
- F(DebugPromiseEvent, 1, 1) \
- F(DebugAsyncTaskEvent, 1, 1) \
- F(PromiseRejectEvent, 3, 1) \
- F(PromiseRevokeReject, 1, 1) \
- F(PromiseHasHandlerSymbol, 0, 1) \
- F(FlattenString, 1, 1) \
- F(LoadMutableDouble, 2, 1) \
- F(TryMigrateInstance, 1, 1) \
- F(NotifyContextDisposed, 0, 1) \
- \
- /* Array join support */ \
- F(PushIfAbsent, 2, 1) \
- F(ArrayConcat, 1, 1) \
- \
- /* Conversions */ \
- F(ToBool, 1, 1) \
- F(Typeof, 1, 1) \
- \
- F(StringToNumber, 1, 1) \
- F(StringParseInt, 2, 1) \
- F(StringParseFloat, 1, 1) \
- F(StringToLowerCase, 1, 1) \
- F(StringToUpperCase, 1, 1) \
- F(StringSplit, 3, 1) \
- F(CharFromCode, 1, 1) \
- F(URIEscape, 1, 1) \
- F(URIUnescape, 1, 1) \
- \
- F(NumberToInteger, 1, 1) \
- F(NumberToIntegerMapMinusZero, 1, 1) \
- F(NumberToJSUint32, 1, 1) \
- F(NumberToJSInt32, 1, 1) \
- \
- /* Arithmetic operations */ \
- F(NumberAdd, 2, 1) \
- F(NumberSub, 2, 1) \
- F(NumberMul, 2, 1) \
- F(NumberDiv, 2, 1) \
- F(NumberMod, 2, 1) \
- F(NumberUnaryMinus, 1, 1) \
- F(NumberImul, 2, 1) \
- \
- F(StringBuilderConcat, 3, 1) \
- F(StringBuilderJoin, 3, 1) \
- F(SparseJoinWithSeparator, 3, 1) \
- \
- /* Bit operations */ \
- F(NumberOr, 2, 1) \
- F(NumberAnd, 2, 1) \
- F(NumberXor, 2, 1) \
- \
- F(NumberShl, 2, 1) \
- F(NumberShr, 2, 1) \
- F(NumberSar, 2, 1) \
- \
- /* Comparisons */ \
- F(NumberEquals, 2, 1) \
- F(StringEquals, 2, 1) \
- \
- F(NumberCompare, 3, 1) \
- F(SmiLexicographicCompare, 2, 1) \
- \
- /* Math */ \
- F(MathAcos, 1, 1) \
- F(MathAsin, 1, 1) \
- F(MathAtan, 1, 1) \
- F(MathFloorRT, 1, 1) \
- F(MathAtan2, 2, 1) \
- F(MathExpRT, 1, 1) \
- F(RoundNumber, 1, 1) \
- F(MathFround, 1, 1) \
- F(RemPiO2, 2, 1) \
- \
- /* Regular expressions */ \
- F(RegExpInitializeAndCompile, 3, 1) \
- F(RegExpExecMultiple, 4, 1) \
- \
- /* JSON */ \
- F(ParseJson, 1, 1) \
- F(BasicJSONStringify, 1, 1) \
- F(QuoteJSONString, 1, 1) \
- \
- /* Strings */ \
- F(StringIndexOf, 3, 1) \
- F(StringLastIndexOf, 3, 1) \
- F(StringLocaleCompare, 2, 1) \
- F(StringReplaceGlobalRegExpWithString, 4, 1) \
- F(StringReplaceOneCharWithString, 3, 1) \
- F(StringMatch, 3, 1) \
- F(StringTrim, 3, 1) \
- F(StringToArray, 2, 1) \
- F(NewStringWrapper, 1, 1) \
- F(NewString, 2, 1) \
- F(TruncateString, 2, 1) \
- \
- /* Numbers */ \
- F(NumberToRadixString, 2, 1) \
- F(NumberToFixed, 2, 1) \
- F(NumberToExponential, 2, 1) \
- F(NumberToPrecision, 2, 1) \
- F(IsValidSmi, 1, 1) \
- \
- /* Classes support */ \
- F(ToMethod, 2, 1) \
- F(HomeObjectSymbol, 0, 1) \
- F(DefineClass, 6, 1) \
- F(DefineClassMethod, 3, 1) \
- F(ClassGetSourceCode, 1, 1) \
- F(LoadFromSuper, 3, 1) \
- F(LoadKeyedFromSuper, 3, 1) \
- F(ThrowConstructorNonCallableError, 0, 1) \
- F(ThrowArrayNotSubclassableError, 0, 1) \
- F(ThrowNonMethodError, 0, 1) \
- F(ThrowUnsupportedSuperError, 0, 1) \
- F(HandleStepInForDerivedConstructors, 1, 1) \
- F(StoreToSuper_Strict, 4, 1) \
- F(StoreToSuper_Sloppy, 4, 1) \
- F(StoreKeyedToSuper_Strict, 4, 1) \
- F(StoreKeyedToSuper_Sloppy, 4, 1)
+#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
+ /* Property access */ \
+ F(GetProperty, 2, 1) \
+ F(KeyedGetProperty, 2, 1) \
+ F(DeleteProperty, 3, 1) \
+ F(HasOwnProperty, 2, 1) \
+ F(HasProperty, 2, 1) \
+ F(HasElement, 2, 1) \
+ F(IsPropertyEnumerable, 2, 1) \
+ F(GetPropertyNames, 1, 1) \
+ F(GetPropertyNamesFast, 1, 1) \
+ F(GetOwnPropertyNames, 2, 1) \
+ F(GetOwnElementNames, 1, 1) \
+ F(GetInterceptorInfo, 1, 1) \
+ F(GetNamedInterceptorPropertyNames, 1, 1) \
+ F(GetIndexedInterceptorElementNames, 1, 1) \
+ F(GetArgumentsProperty, 1, 1) \
+ F(ToFastProperties, 1, 1) \
+ F(FinishArrayPrototypeSetup, 1, 1) \
+ F(SpecialArrayFunctions, 0, 1) \
+ F(IsSloppyModeFunction, 1, 1) \
+ F(GetDefaultReceiver, 1, 1) \
+ \
+ F(SetPrototype, 2, 1) \
+ F(InternalSetPrototype, 2, 1) \
+ F(IsInPrototypeChain, 2, 1) \
+ \
+ F(GetOwnProperty, 2, 1) \
+ \
+ F(IsExtensible, 1, 1) \
+ F(PreventExtensions, 1, 1) \
+ \
+ /* Utilities */ \
+ F(CheckIsBootstrapping, 0, 1) \
+ F(GetRootNaN, 0, 1) \
+ F(Call, -1 /* >= 2 */, 1) \
+ F(Apply, 5, 1) \
+ F(GetFunctionDelegate, 1, 1) \
+ F(GetConstructorDelegate, 1, 1) \
+ F(DeoptimizeFunction, 1, 1) \
+ F(DeoptimizeNow, 0, 1) \
+ F(ClearFunctionTypeFeedback, 1, 1) \
+ F(RunningInSimulator, 0, 1) \
+ F(IsConcurrentRecompilationSupported, 0, 1) \
+ F(OptimizeFunctionOnNextCall, -1, 1) \
+ F(OptimizeOsr, -1, 1) \
+ F(NeverOptimizeFunction, 1, 1) \
+ F(GetOptimizationStatus, -1, 1) \
+ F(GetOptimizationCount, 1, 1) \
+ F(UnblockConcurrentRecompilation, 0, 1) \
+ F(CompileForOnStackReplacement, 1, 1) \
+ F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
+ F(SetNativeFlag, 1, 1) \
+ F(IsConstructor, 1, 1) \
+ F(SetInlineBuiltinFlag, 1, 1) \
+ F(StoreArrayLiteralElement, 5, 1) \
+ F(DebugPrepareStepInIfStepping, 1, 1) \
+ F(DebugPushPromise, 2, 1) \
+ F(DebugPopPromise, 0, 1) \
+ F(DebugPromiseEvent, 1, 1) \
+ F(DebugAsyncTaskEvent, 1, 1) \
+ F(PromiseRejectEvent, 3, 1) \
+ F(PromiseRevokeReject, 1, 1) \
+ F(PromiseHasHandlerSymbol, 0, 1) \
+ F(FlattenString, 1, 1) \
+ F(LoadMutableDouble, 2, 1) \
+ F(TryMigrateInstance, 1, 1) \
+ F(NotifyContextDisposed, 0, 1) \
+ F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(IncrementStatsCounter, 1, 1) \
+ \
+ /* Array join support */ \
+ F(PushIfAbsent, 2, 1) \
+ F(ArrayConcat, 1, 1) \
+ \
+ /* Conversions */ \
+ F(ToBool, 1, 1) \
+ F(Typeof, 1, 1) \
+ \
+ F(StringToNumber, 1, 1) \
+ F(StringParseInt, 2, 1) \
+ F(StringParseFloat, 1, 1) \
+ F(StringToLowerCase, 1, 1) \
+ F(StringToUpperCase, 1, 1) \
+ F(StringSplit, 3, 1) \
+ F(CharFromCode, 1, 1) \
+ F(URIEscape, 1, 1) \
+ F(URIUnescape, 1, 1) \
+ \
+ F(NumberToInteger, 1, 1) \
+ F(NumberToIntegerMapMinusZero, 1, 1) \
+ F(NumberToJSUint32, 1, 1) \
+ F(NumberToJSInt32, 1, 1) \
+ \
+ /* Arithmetic operations */ \
+ F(NumberAdd, 2, 1) \
+ F(NumberSub, 2, 1) \
+ F(NumberMul, 2, 1) \
+ F(NumberDiv, 2, 1) \
+ F(NumberMod, 2, 1) \
+ F(NumberUnaryMinus, 1, 1) \
+ F(NumberImul, 2, 1) \
+ \
+ F(StringBuilderConcat, 3, 1) \
+ F(StringBuilderJoin, 3, 1) \
+ F(SparseJoinWithSeparator, 3, 1) \
+ \
+ /* Bit operations */ \
+ F(NumberOr, 2, 1) \
+ F(NumberAnd, 2, 1) \
+ F(NumberXor, 2, 1) \
+ \
+ F(NumberShl, 2, 1) \
+ F(NumberShr, 2, 1) \
+ F(NumberSar, 2, 1) \
+ \
+ /* Comparisons */ \
+ F(NumberEquals, 2, 1) \
+ F(StringEquals, 2, 1) \
+ \
+ F(NumberCompare, 3, 1) \
+ F(SmiLexicographicCompare, 2, 1) \
+ \
+ /* Math */ \
+ F(MathAcos, 1, 1) \
+ F(MathAsin, 1, 1) \
+ F(MathAtan, 1, 1) \
+ F(MathAtan2, 2, 1) \
+ F(MathExpRT, 1, 1) \
+ F(RoundNumber, 1, 1) \
+ F(MathFround, 1, 1) \
+ F(RemPiO2, 2, 1) \
+ \
+ /* Regular expressions */ \
+ F(RegExpInitializeAndCompile, 3, 1) \
+ F(RegExpExecMultiple, 4, 1) \
+ F(RegExpExecReThrow, 4, 1) \
+ \
+ /* JSON */ \
+ F(ParseJson, 1, 1) \
+ F(BasicJSONStringify, 1, 1) \
+ F(QuoteJSONString, 1, 1) \
+ \
+ /* Strings */ \
+ F(StringIndexOf, 3, 1) \
+ F(StringLastIndexOf, 3, 1) \
+ F(StringLocaleCompare, 2, 1) \
+ F(StringReplaceGlobalRegExpWithString, 4, 1) \
+ F(StringReplaceOneCharWithString, 3, 1) \
+ F(StringMatch, 3, 1) \
+ F(StringTrim, 3, 1) \
+ F(StringToArray, 2, 1) \
+ F(NewStringWrapper, 1, 1) \
+ F(NewString, 2, 1) \
+ F(NewConsString, 4, 1) \
+ F(TruncateString, 2, 1) \
+ \
+ /* Numbers */ \
+ F(NumberToRadixString, 2, 1) \
+ F(NumberToFixed, 2, 1) \
+ F(NumberToExponential, 2, 1) \
+ F(NumberToPrecision, 2, 1) \
+ F(IsValidSmi, 1, 1) \
+ \
+ /* Classes support */ \
+ F(ClassGetSourceCode, 1, 1) \
+ F(DefineClass, 6, 1) \
+ F(DefineClassMethod, 3, 1) \
+ F(HandleStepInForDerivedConstructors, 1, 1) \
+ F(HomeObjectSymbol, 0, 1) \
+ F(LoadFromSuper, 3, 1) \
+ F(LoadKeyedFromSuper, 3, 1) \
+ F(StoreKeyedToSuper_Sloppy, 4, 1) \
+ F(StoreKeyedToSuper_Strict, 4, 1) \
+ F(StoreToSuper_Sloppy, 4, 1) \
+ F(StoreToSuper_Strict, 4, 1) \
+ F(ThrowArrayNotSubclassableError, 0, 1) \
+ F(ThrowConstructorNonCallableError, 0, 1) \
+ F(ThrowIfStaticPrototype, 1, 1) \
+ F(ThrowNonMethodError, 0, 1) \
+ F(ThrowStaticPrototypeError, 0, 1) \
+ F(ThrowUnsupportedSuperError, 0, 1) \
+ F(ToMethod, 2, 1)
#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
@@ -420,6 +422,7 @@ namespace internal {
F(HasFastProperties, 1, 1) \
F(TransitionElementsKind, 2, 1) \
F(HaveSameMap, 2, 1) \
+ F(DisassembleFunction, 1, 1) \
F(IsJSGlobalProxy, 1, 1) \
F(ForInCacheArrayLength, 2, 1) /* TODO(turbofan): Only temporary */
@@ -427,14 +430,13 @@ namespace internal {
#define RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \
/* String and Regexp */ \
F(NumberToStringRT, 1, 1) \
- F(RegExpConstructResult, 3, 1) \
- F(RegExpExecRT, 4, 1) \
- F(StringAdd, 2, 1) \
- F(SubString, 3, 1) \
+ F(RegExpConstructResultRT, 3, 1) \
+ F(StringAddRT, 2, 1) \
+ F(SubStringRT, 3, 1) \
F(InternalizeString, 1, 1) \
- F(StringCompare, 2, 1) \
+ F(StringCompareRT, 2, 1) \
F(StringCharCodeAtRT, 2, 1) \
- F(GetFromCache, 2, 1) \
+ F(GetFromCacheRT, 2, 1) \
\
/* Compilation */ \
F(CompileLazy, 1, 1) \
@@ -458,7 +460,7 @@ namespace internal {
\
/* Harmony generators */ \
F(CreateJSGeneratorObject, 0, 1) \
- F(SuspendJSGeneratorObject, 1, 1) \
+ F(SuspendJSGeneratorObject, -1, 1) \
F(ResumeJSGeneratorObject, 3, 1) \
F(GeneratorClose, 1, 1) \
\
@@ -486,6 +488,7 @@ namespace internal {
F(ThrowConstAssignError, 0, 1) \
F(StackGuard, 0, 1) \
F(Interrupt, 0, 1) \
+ F(FindExceptionHandler, 0, 1) \
F(PromoteScheduledException, 0, 1) \
\
/* Contexts */ \
@@ -510,7 +513,7 @@ namespace internal {
F(MathPowRT, 2, 1)
-#define RUNTIME_FUNCTION_LIST_RETURN_PAIR(F) \
+#define FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
F(LoadLookupSlot, 2, 2) \
F(LoadLookupSlotNoReferenceError, 2, 2) \
F(ResolvePossiblyDirectEval, 6, 2) \
@@ -522,7 +525,7 @@ namespace internal {
/* Debugger support*/ \
F(DebugBreak, 0, 1) \
F(SetDebugEventListener, 2, 1) \
- F(Break, 0, 1) \
+ F(ScheduleBreak, 0, 1) \
F(DebugGetPropertyDetails, 2, 1) \
F(DebugGetProperty, 2, 1) \
F(DebugPropertyTypeFromDetails, 1, 1) \
@@ -561,9 +564,8 @@ namespace internal {
F(DebugSetScriptSource, 2, 1) \
F(DebugCallbackSupportsStepping, 1, 1) \
F(SystemBreak, 0, 1) \
- F(DebugDisassembleFunction, 1, 1) \
- F(DebugDisassembleConstructor, 1, 1) \
F(FunctionGetInferredName, 1, 1) \
+ F(FunctionGetDebugName, 1, 1) \
F(LiveEditFindSharedFunctionInfosForScript, 1, 1) \
F(LiveEditGatherCompileInfo, 2, 1) \
F(LiveEditReplaceScript, 3, 1) \
@@ -576,7 +578,7 @@ namespace internal {
F(LiveEditCompareStrings, 2, 1) \
F(LiveEditRestartFrame, 2, 1) \
F(GetFunctionCodePositionFromSource, 2, 1) \
- F(ExecuteInDebugContext, 2, 1) \
+ F(ExecuteInDebugContext, 1, 1) \
F(GetDebugContext, 0, 1) \
F(SetFlags, 1, 1) \
F(CollectGarbage, 1, 1) \
@@ -627,26 +629,8 @@ namespace internal {
// ----------------------------------------------------------------------------
-// RUNTIME_FUNCTION_LIST defines all runtime functions accessed
-// either directly by id (via the code generator), or indirectly
-// via a native call by name (from within JS code).
-// Entries have the form F(name, number of arguments, number of return values).
-
-#define RUNTIME_FUNCTION_LIST_RETURN_OBJECT(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
- RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \
- RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
- RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
-
-
-#define RUNTIME_FUNCTION_LIST(F) \
- RUNTIME_FUNCTION_LIST_RETURN_OBJECT(F) \
- RUNTIME_FUNCTION_LIST_RETURN_PAIR(F)
-
-// ----------------------------------------------------------------------------
-// INLINE_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code.
+// INLINE_FUNCTION_LIST defines the intrinsics typically handled specially by
+// the various compilers.
// Entries have the form F(name, number of arguments, number of return values).
#define INLINE_FUNCTION_LIST(F) \
F(IsSmi, 1, 1) \
@@ -664,7 +648,9 @@ namespace internal {
F(DateField, 2 /* date object, field index */, 1) \
F(StringCharFromCode, 1, 1) \
F(StringCharAt, 2, 1) \
+ F(OneByteSeqStringGetChar, 2, 1) \
F(OneByteSeqStringSetChar, 3, 1) \
+ F(TwoByteSeqStringGetChar, 2, 1) \
F(TwoByteSeqStringSetChar, 3, 1) \
F(ObjectEquals, 2, 1) \
F(IsObject, 1, 1) \
@@ -693,11 +679,8 @@ namespace internal {
// ----------------------------------------------------------------------------
-// INLINE_OPTIMIZED_FUNCTION_LIST defines all inlined functions accessed
-// with a native call of the form %_name from within JS code that also have
-// a corresponding runtime function, that is called from non-optimized code.
-// For the benefit of (fuzz) tests, the runtime version can also be called
-// directly as %name (i.e. without the leading underscore).
+// INLINE_OPTIMIZED_FUNCTION_LIST defines the intrinsics typically handled
+// specially by Crankshaft.
// Entries have the form F(name, number of arguments, number of return values).
#define INLINE_OPTIMIZED_FUNCTION_LIST(F) \
/* Typed Arrays */ \
@@ -714,7 +697,9 @@ namespace internal {
F(ConstructDouble, 2, 1) \
F(DoubleHi, 1, 1) \
F(DoubleLo, 1, 1) \
- F(MathSqrtRT, 1, 1) \
+ F(MathClz32, 1, 1) \
+ F(MathFloor, 1, 1) \
+ F(MathSqrt, 1, 1) \
F(MathLogRT, 1, 1) \
/* ES6 Collections */ \
F(MapClear, 1, 1) \
@@ -732,7 +717,33 @@ namespace internal {
F(SetInitialize, 1, 1) \
/* Arrays */ \
F(HasFastPackedElements, 1, 1) \
- F(GetPrototype, 1, 1)
+ F(GetPrototype, 1, 1) \
+ /* Strings */ \
+ F(StringGetLength, 1, 1) \
+ /* JSValue */ \
+ F(JSValueGetValue, 1, 1) \
+ /* HeapObject */ \
+ F(HeapObjectGetMap, 1, 1) \
+ /* Map */ \
+ F(MapGetInstanceType, 1, 1)
+
+
+#define FOR_EACH_INTRINSIC_RETURN_OBJECT(F) \
+ RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
+ RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
+ RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \
+ RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
+ RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F) \
+ INLINE_FUNCTION_LIST(F) \
+ INLINE_OPTIMIZED_FUNCTION_LIST(F)
+
+
+// FOR_EACH_INTRINSIC defines the list of all intrinsics, coming in 2 flavors,
+// either returning an object or a pair.
+// Entries have the form F(name, number of arguments, number of values).
+#define FOR_EACH_INTRINSIC(F) \
+ FOR_EACH_INTRINSIC_RETURN_PAIR(F) \
+ FOR_EACH_INTRINSIC_RETURN_OBJECT(F)
//---------------------------------------------------------------------------
@@ -766,19 +777,14 @@ class Runtime : public AllStatic {
public:
enum FunctionId {
#define F(name, nargs, ressize) k##name,
- RUNTIME_FUNCTION_LIST(F) INLINE_OPTIMIZED_FUNCTION_LIST(F)
-#undef F
-#define F(name, nargs, ressize) kInline##name,
- INLINE_FUNCTION_LIST(F)
-#undef F
-#define F(name, nargs, ressize) kInlineOptimized##name,
- INLINE_OPTIMIZED_FUNCTION_LIST(F)
+#define I(name, nargs, ressize) kInline##name,
+ FOR_EACH_INTRINSIC(F) FOR_EACH_INTRINSIC(I)
+#undef I
#undef F
- kNumFunctions,
- kFirstInlineFunction = kInlineIsSmi
+ kNumFunctions,
};
- enum IntrinsicType { RUNTIME, INLINE, INLINE_OPTIMIZED };
+ enum IntrinsicType { RUNTIME, INLINE };
// Intrinsic function descriptor.
struct Function {
diff --git a/deps/v8/src/scanner.cc b/deps/v8/src/scanner.cc
index de1b8e8b72..a40688f52b 100644
--- a/deps/v8/src/scanner.cc
+++ b/deps/v8/src/scanner.cc
@@ -35,11 +35,9 @@ Handle<String> LiteralBuffer::Internalize(Isolate* isolate) const {
Scanner::Scanner(UnicodeCache* unicode_cache)
: unicode_cache_(unicode_cache),
octal_pos_(Location::invalid()),
- harmony_scoping_(false),
harmony_modules_(false),
harmony_numeric_literals_(false),
harmony_classes_(false),
- harmony_templates_(false),
harmony_unicode_(false) {}
@@ -656,10 +654,8 @@ void Scanner::Scan() {
break;
case '`':
- if (HarmonyTemplates()) {
- token = ScanTemplateStart();
- break;
- }
+ token = ScanTemplateStart();
+ break;
default:
if (c0_ < 0) {
@@ -788,11 +784,31 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
}
+const int kMaxAscii = 127;
+
+
Token::Value Scanner::ScanString() {
uc32 quote = c0_;
- Advance(); // consume quote
+ Advance<false, false>(); // consume quote
LiteralScope literal(this);
+ while (true) {
+ if (c0_ > kMaxAscii) {
+ HandleLeadSurrogate();
+ break;
+ }
+ if (c0_ < 0 || c0_ == '\n' || c0_ == '\r') return Token::ILLEGAL;
+ if (c0_ == quote) {
+ literal.Complete();
+ Advance<false, false>();
+ return Token::STRING;
+ }
+ uc32 c = c0_;
+ if (c == '\\') break;
+ Advance<false, false>();
+ AddLiteralChar(c);
+ }
+
while (c0_ != quote && c0_ >= 0
&& !unicode_cache_->IsLineTerminator(c0_)) {
uc32 c = c0_;
@@ -913,6 +929,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
enum { DECIMAL, HEX, OCTAL, IMPLICIT_OCTAL, BINARY } kind = DECIMAL;
LiteralScope literal(this);
+ bool at_start = !seen_period;
if (seen_period) {
// we have already seen a decimal point of the float
AddLiteralChar('.');
@@ -962,6 +979,7 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
kind = IMPLICIT_OCTAL;
while (true) {
if (c0_ == '8' || c0_ == '9') {
+ at_start = false;
kind = DECIMAL;
break;
}
@@ -977,6 +995,27 @@ Token::Value Scanner::ScanNumber(bool seen_period) {
// Parse decimal digits and allow trailing fractional part.
if (kind == DECIMAL) {
+ if (at_start) {
+ uint64_t value = 0;
+ while (IsDecimalDigit(c0_)) {
+ value = 10 * value + (c0_ - '0');
+
+ uc32 first_char = c0_;
+ Advance<false, false>();
+ AddLiteralChar(first_char);
+ }
+
+ if (next_.literal_chars->one_byte_literal().length() <= 10 &&
+ value <= Smi::kMaxValue && c0_ != '.' && c0_ != 'e' && c0_ != 'E') {
+ smi_value_ = static_cast<int>(value);
+ literal.Complete();
+ HandleLeadSurrogate();
+
+ return Token::SMI;
+ }
+ HandleLeadSurrogate();
+ }
+
ScanDecimalDigits(); // optional
if (c0_ == '.') {
AddLiteralCharAdvance();
@@ -1046,79 +1085,77 @@ uc32 Scanner::ScanUnicodeEscape() {
// ----------------------------------------------------------------------------
// Keyword Matcher
-#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
- KEYWORD_GROUP('b') \
- KEYWORD("break", Token::BREAK) \
- KEYWORD_GROUP('c') \
- KEYWORD("case", Token::CASE) \
- KEYWORD("catch", Token::CATCH) \
- KEYWORD("class", \
- harmony_classes ? Token::CLASS : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("const", Token::CONST) \
- KEYWORD("continue", Token::CONTINUE) \
- KEYWORD_GROUP('d') \
- KEYWORD("debugger", Token::DEBUGGER) \
- KEYWORD("default", Token::DEFAULT) \
- KEYWORD("delete", Token::DELETE) \
- KEYWORD("do", Token::DO) \
- KEYWORD_GROUP('e') \
- KEYWORD("else", Token::ELSE) \
- KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
- KEYWORD("export", \
- harmony_modules ? Token::EXPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("extends", \
- harmony_classes ? Token::EXTENDS : Token::FUTURE_RESERVED_WORD) \
- KEYWORD_GROUP('f') \
- KEYWORD("false", Token::FALSE_LITERAL) \
- KEYWORD("finally", Token::FINALLY) \
- KEYWORD("for", Token::FOR) \
- KEYWORD("function", Token::FUNCTION) \
- KEYWORD_GROUP('i') \
- KEYWORD("if", Token::IF) \
- KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("import", \
- harmony_modules ? Token::IMPORT : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("in", Token::IN) \
- KEYWORD("instanceof", Token::INSTANCEOF) \
- KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('l') \
- KEYWORD("let", \
- harmony_scoping ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('n') \
- KEYWORD("new", Token::NEW) \
- KEYWORD("null", Token::NULL_LITERAL) \
- KEYWORD_GROUP('p') \
- KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD_GROUP('r') \
- KEYWORD("return", Token::RETURN) \
- KEYWORD_GROUP('s') \
- KEYWORD("static", harmony_classes ? Token::STATIC \
- : Token::FUTURE_STRICT_RESERVED_WORD) \
- KEYWORD("super", \
- harmony_classes ? Token::SUPER : Token::FUTURE_RESERVED_WORD) \
- KEYWORD("switch", Token::SWITCH) \
- KEYWORD_GROUP('t') \
- KEYWORD("this", Token::THIS) \
- KEYWORD("throw", Token::THROW) \
- KEYWORD("true", Token::TRUE_LITERAL) \
- KEYWORD("try", Token::TRY) \
- KEYWORD("typeof", Token::TYPEOF) \
- KEYWORD_GROUP('v') \
- KEYWORD("var", Token::VAR) \
- KEYWORD("void", Token::VOID) \
- KEYWORD_GROUP('w') \
- KEYWORD("while", Token::WHILE) \
- KEYWORD("with", Token::WITH) \
- KEYWORD_GROUP('y') \
+#define KEYWORDS(KEYWORD_GROUP, KEYWORD) \
+ KEYWORD_GROUP('b') \
+ KEYWORD("break", Token::BREAK) \
+ KEYWORD_GROUP('c') \
+ KEYWORD("case", Token::CASE) \
+ KEYWORD("catch", Token::CATCH) \
+ KEYWORD("class", \
+ harmony_classes ? Token::CLASS : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("const", Token::CONST) \
+ KEYWORD("continue", Token::CONTINUE) \
+ KEYWORD_GROUP('d') \
+ KEYWORD("debugger", Token::DEBUGGER) \
+ KEYWORD("default", Token::DEFAULT) \
+ KEYWORD("delete", Token::DELETE) \
+ KEYWORD("do", Token::DO) \
+ KEYWORD_GROUP('e') \
+ KEYWORD("else", Token::ELSE) \
+ KEYWORD("enum", Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("export", \
+ harmony_modules ? Token::EXPORT : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("extends", \
+ harmony_classes ? Token::EXTENDS : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD_GROUP('f') \
+ KEYWORD("false", Token::FALSE_LITERAL) \
+ KEYWORD("finally", Token::FINALLY) \
+ KEYWORD("for", Token::FOR) \
+ KEYWORD("function", Token::FUNCTION) \
+ KEYWORD_GROUP('i') \
+ KEYWORD("if", Token::IF) \
+ KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("import", \
+ harmony_modules ? Token::IMPORT : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("in", Token::IN) \
+ KEYWORD("instanceof", Token::INSTANCEOF) \
+ KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('l') \
+ KEYWORD("let", Token::LET) \
+ KEYWORD_GROUP('n') \
+ KEYWORD("new", Token::NEW) \
+ KEYWORD("null", Token::NULL_LITERAL) \
+ KEYWORD_GROUP('p') \
+ KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD_GROUP('r') \
+ KEYWORD("return", Token::RETURN) \
+ KEYWORD_GROUP('s') \
+ KEYWORD("static", harmony_classes ? Token::STATIC \
+ : Token::FUTURE_STRICT_RESERVED_WORD) \
+ KEYWORD("super", \
+ harmony_classes ? Token::SUPER : Token::FUTURE_RESERVED_WORD) \
+ KEYWORD("switch", Token::SWITCH) \
+ KEYWORD_GROUP('t') \
+ KEYWORD("this", Token::THIS) \
+ KEYWORD("throw", Token::THROW) \
+ KEYWORD("true", Token::TRUE_LITERAL) \
+ KEYWORD("try", Token::TRY) \
+ KEYWORD("typeof", Token::TYPEOF) \
+ KEYWORD_GROUP('v') \
+ KEYWORD("var", Token::VAR) \
+ KEYWORD("void", Token::VOID) \
+ KEYWORD_GROUP('w') \
+ KEYWORD("while", Token::WHILE) \
+ KEYWORD("with", Token::WITH) \
+ KEYWORD_GROUP('y') \
KEYWORD("yield", Token::YIELD)
static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
int input_length,
- bool harmony_scoping,
bool harmony_modules,
bool harmony_classes) {
DCHECK(input_length >= 1);
@@ -1168,16 +1205,59 @@ bool Scanner::IdentifierIsFutureStrictReserved(
}
return Token::FUTURE_STRICT_RESERVED_WORD ==
KeywordOrIdentifierToken(string->raw_data(), string->length(),
- harmony_scoping_, harmony_modules_,
- harmony_classes_);
+ harmony_modules_, harmony_classes_);
}
Token::Value Scanner::ScanIdentifierOrKeyword() {
DCHECK(unicode_cache_->IsIdentifierStart(c0_));
LiteralScope literal(this);
- // Scan identifier start character.
- if (c0_ == '\\') {
+ if (IsInRange(c0_, 'a', 'z')) {
+ do {
+ uc32 first_char = c0_;
+ Advance<false, false>();
+ AddLiteralChar(first_char);
+ } while (IsInRange(c0_, 'a', 'z'));
+
+ if (IsDecimalDigit(c0_) || IsInRange(c0_, 'A', 'Z') || c0_ == '_' ||
+ c0_ == '$') {
+ // Identifier starting with lowercase.
+ uc32 first_char = c0_;
+ Advance<false, false>();
+ AddLiteralChar(first_char);
+ while (IsAsciiIdentifier(c0_)) {
+ uc32 first_char = c0_;
+ Advance<false, false>();
+ AddLiteralChar(first_char);
+ }
+ if (c0_ <= kMaxAscii && c0_ != '\\') {
+ literal.Complete();
+ return Token::IDENTIFIER;
+ }
+ } else if (c0_ <= kMaxAscii && c0_ != '\\') {
+ // Only a-z+: could be a keyword or identifier.
+ literal.Complete();
+ Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
+ return KeywordOrIdentifierToken(chars.start(), chars.length(),
+ harmony_modules_, harmony_classes_);
+ }
+
+ HandleLeadSurrogate();
+ } else if (IsInRange(c0_, 'A', 'Z') || c0_ == '_' || c0_ == '$') {
+ do {
+ uc32 first_char = c0_;
+ Advance<false, false>();
+ AddLiteralChar(first_char);
+ } while (IsAsciiIdentifier(c0_));
+
+ if (c0_ <= kMaxAscii && c0_ != '\\') {
+ literal.Complete();
+ return Token::IDENTIFIER;
+ }
+
+ HandleLeadSurrogate();
+ } else if (c0_ == '\\') {
+ // Scan identifier start character.
uc32 c = ScanIdentifierUnicodeEscape();
// Only allow legal identifier start characters.
if (c < 0 ||
@@ -1187,12 +1267,12 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
}
AddLiteralChar(c);
return ScanIdentifierSuffix(&literal);
+ } else {
+ uc32 first_char = c0_;
+ Advance();
+ AddLiteralChar(first_char);
}
- uc32 first_char = c0_;
- Advance();
- AddLiteralChar(first_char);
-
// Scan the rest of the identifier characters.
while (c0_ >= 0 && unicode_cache_->IsIdentifierPart(c0_)) {
if (c0_ != '\\') {
@@ -1211,11 +1291,9 @@ Token::Value Scanner::ScanIdentifierOrKeyword() {
Vector<const uint8_t> chars = next_.literal_chars->one_byte_literal();
return KeywordOrIdentifierToken(chars.start(),
chars.length(),
- harmony_scoping_,
harmony_modules_,
harmony_classes_);
}
-
return Token::IDENTIFIER;
}
@@ -1341,11 +1419,6 @@ double Scanner::DoubleValue() {
}
-int Scanner::FindNumber(DuplicateFinder* finder, int value) {
- return finder->AddNumber(literal_one_byte_string(), value);
-}
-
-
int Scanner::FindSymbol(DuplicateFinder* finder, int value) {
if (is_literal_one_byte()) {
return finder->AddOneByteSymbol(literal_one_byte_string(), value);
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index 86a0098f86..804082562e 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -427,7 +427,6 @@ class Scanner {
}
}
- int FindNumber(DuplicateFinder* finder, int value);
int FindSymbol(DuplicateFinder* finder, int value);
UnicodeCache* unicode_cache() { return unicode_cache_; }
@@ -436,18 +435,15 @@ class Scanner {
Location octal_position() const { return octal_pos_; }
void clear_octal_position() { octal_pos_ = Location::invalid(); }
+ // Returns the value of the last smi that was scanned.
+ int smi_value() const { return smi_value_; }
+
// Seek forward to the given position. This operation does not
// work in general, for instance when there are pushed back
// characters, but works for seeking forward until simple delimiter
// tokens, which is what it is used for.
void SeekForward(int pos);
- bool HarmonyScoping() const {
- return harmony_scoping_;
- }
- void SetHarmonyScoping(bool scoping) {
- harmony_scoping_ = scoping;
- }
bool HarmonyModules() const {
return harmony_modules_;
}
@@ -466,8 +462,6 @@ class Scanner {
void SetHarmonyClasses(bool classes) {
harmony_classes_ = classes;
}
- bool HarmonyTemplates() const { return harmony_templates_; }
- void SetHarmonyTemplates(bool templates) { harmony_templates_ = templates; }
bool HarmonyUnicode() const { return harmony_unicode_; }
void SetHarmonyUnicode(bool unicode) { harmony_unicode_ = unicode; }
@@ -530,8 +524,11 @@ class Scanner {
}
inline void StartRawLiteral() {
- raw_literal_buffer_.Reset();
- next_.raw_literal_chars = &raw_literal_buffer_;
+ LiteralBuffer* free_buffer =
+ (current_.raw_literal_chars == &raw_literal_buffer1_) ?
+ &raw_literal_buffer2_ : &raw_literal_buffer1_;
+ free_buffer->Reset();
+ next_.raw_literal_chars = free_buffer;
}
INLINE(void AddLiteralChar(uc32 c)) {
@@ -562,12 +559,16 @@ class Scanner {
}
// Low-level scanning support.
- template <bool capture_raw = false>
+ template <bool capture_raw = false, bool check_surrogate = true>
void Advance() {
if (capture_raw) {
AddRawLiteralChar(c0_);
}
c0_ = source_->Advance();
+ if (check_surrogate) HandleLeadSurrogate();
+ }
+
+ void HandleLeadSurrogate() {
if (unibrow::Utf16::IsLeadSurrogate(c0_)) {
uc32 c1 = source_->Advance();
if (!unibrow::Utf16::IsTrailSurrogate(c1)) {
@@ -710,7 +711,8 @@ class Scanner {
LiteralBuffer source_mapping_url_;
// Buffer to store raw string values
- LiteralBuffer raw_literal_buffer_;
+ LiteralBuffer raw_literal_buffer1_;
+ LiteralBuffer raw_literal_buffer2_;
TokenDesc current_; // desc for current token (as returned by Next())
TokenDesc next_; // desc for next token (one token look-ahead)
@@ -722,6 +724,9 @@ class Scanner {
// Start position of the octal literal last scanned.
Location octal_pos_;
+ // Value of the last smi that was scanned.
+ int smi_value_;
+
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
@@ -732,16 +737,12 @@ class Scanner {
// Whether there is a multi-line comment that contains a
// line-terminator after the current token, and before the next.
bool has_multiline_comment_before_next_;
- // Whether we scan 'let' as a keyword for harmony block-scoped let bindings.
- bool harmony_scoping_;
// Whether we scan 'module', 'import', 'export' as keywords.
bool harmony_modules_;
// Whether we scan 0o777 and 0b111 as numbers.
bool harmony_numeric_literals_;
// Whether we scan 'class', 'extends', 'static' and 'super' as keywords.
bool harmony_classes_;
- // Whether we scan TEMPLATE_SPAN and TEMPLATE_TAIL
- bool harmony_templates_;
// Whether we allow \u{xxxxx}.
bool harmony_unicode_;
};
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index 74aefdb954..99d4c38297 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -18,9 +18,14 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
// Collect stack and context locals.
ZoneList<Variable*> stack_locals(scope->StackLocalCount(), zone);
ZoneList<Variable*> context_locals(scope->ContextLocalCount(), zone);
- scope->CollectStackAndContextLocals(&stack_locals, &context_locals);
+ ZoneList<Variable*> strong_mode_free_variables(0, zone);
+
+ scope->CollectStackAndContextLocals(&stack_locals, &context_locals,
+ &strong_mode_free_variables);
const int stack_local_count = stack_locals.length();
const int context_local_count = context_locals.length();
+ const int strong_mode_free_variable_count =
+ strong_mode_free_variables.length();
// Make sure we allocate the correct amount.
DCHECK(scope->StackLocalCount() == stack_local_count);
DCHECK(scope->ContextLocalCount() == context_local_count);
@@ -49,9 +54,10 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
const bool has_function_name = function_name_info != NONE;
const int parameter_count = scope->num_parameters();
- const int length = kVariablePartIndex
- + parameter_count + stack_local_count + 2 * context_local_count
- + (has_function_name ? 2 : 0);
+ const int length = kVariablePartIndex + parameter_count + stack_local_count +
+ 2 * context_local_count +
+ 3 * strong_mode_free_variable_count +
+ (has_function_name ? 2 : 0);
Factory* factory = isolate->factory();
Handle<ScopeInfo> scope_info = factory->NewScopeInfo(length);
@@ -64,11 +70,14 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
FunctionVariableMode::encode(function_variable_mode) |
AsmModuleField::encode(scope->asm_module()) |
AsmFunctionField::encode(scope->asm_function()) |
- IsSimpleParameterListField::encode(simple_parameter_list);
+ IsSimpleParameterListField::encode(simple_parameter_list) |
+ BlockScopeIsClassScopeField::encode(scope->is_class_scope()) |
+ FunctionKindField::encode(scope->function_kind());
scope_info->SetFlags(flags);
scope_info->SetParameterCount(parameter_count);
scope_info->SetStackLocalCount(stack_local_count);
scope_info->SetContextLocalCount(context_local_count);
+ scope_info->SetStrongModeFreeVariableCount(strong_mode_free_variable_count);
int index = kVariablePartIndex;
// Add parameters.
@@ -111,6 +120,25 @@ Handle<ScopeInfo> ScopeInfo::Create(Isolate* isolate, Zone* zone,
scope_info->set(index++, Smi::FromInt(value));
}
+ DCHECK(index == scope_info->StrongModeFreeVariableNameEntriesIndex());
+ for (int i = 0; i < strong_mode_free_variable_count; ++i) {
+ scope_info->set(index++, *strong_mode_free_variables[i]->name());
+ }
+
+ DCHECK(index == scope_info->StrongModeFreeVariablePositionEntriesIndex());
+ for (int i = 0; i < strong_mode_free_variable_count; ++i) {
+ // Unfortunately, the source code positions are stored as int even though
+ // int32_t would be enough (given the maximum source code length).
+ Handle<Object> start_position = factory->NewNumberFromInt(
+ static_cast<int32_t>(strong_mode_free_variables[i]
+ ->strong_mode_reference_start_position()));
+ scope_info->set(index++, *start_position);
+ Handle<Object> end_position = factory->NewNumberFromInt(
+ static_cast<int32_t>(strong_mode_free_variables[i]
+ ->strong_mode_reference_end_position()));
+ scope_info->set(index++, *end_position);
+ }
+
// If present, add the function variable name and its index.
DCHECK(index == scope_info->FunctionNameEntryIndex());
if (has_function_name) {
@@ -283,6 +311,35 @@ bool ScopeInfo::LocalIsSynthetic(int var) {
}
+String* ScopeInfo::StrongModeFreeVariableName(int var) {
+ DCHECK(0 <= var && var < StrongModeFreeVariableCount());
+ int info_index = StrongModeFreeVariableNameEntriesIndex() + var;
+ return String::cast(get(info_index));
+}
+
+
+int ScopeInfo::StrongModeFreeVariableStartPosition(int var) {
+ DCHECK(0 <= var && var < StrongModeFreeVariableCount());
+ int info_index = StrongModeFreeVariablePositionEntriesIndex() + var * 2;
+ int32_t value = 0;
+ bool ok = get(info_index)->ToInt32(&value);
+ USE(ok);
+ DCHECK(ok);
+ return value;
+}
+
+
+int ScopeInfo::StrongModeFreeVariableEndPosition(int var) {
+ DCHECK(0 <= var && var < StrongModeFreeVariableCount());
+ int info_index = StrongModeFreeVariablePositionEntriesIndex() + var * 2 + 1;
+ int32_t value = 0;
+ bool ok = get(info_index)->ToInt32(&value);
+ USE(ok);
+ DCHECK(ok);
+ return value;
+}
+
+
int ScopeInfo::StackSlotIndex(String* name) {
DCHECK(name->IsInternalizedString());
if (length() > 0) {
@@ -373,6 +430,16 @@ int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
}
+bool ScopeInfo::block_scope_is_class_scope() {
+ return BlockScopeIsClassScopeField::decode(Flags());
+}
+
+
+FunctionKind ScopeInfo::function_kind() {
+ return FunctionKindField::decode(Flags());
+}
+
+
bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
Handle<Context> context,
Handle<JSObject> scope_object) {
@@ -420,11 +487,23 @@ int ScopeInfo::ContextLocalInfoEntriesIndex() {
}
-int ScopeInfo::FunctionNameEntryIndex() {
+int ScopeInfo::StrongModeFreeVariableNameEntriesIndex() {
return ContextLocalInfoEntriesIndex() + ContextLocalCount();
}
+int ScopeInfo::StrongModeFreeVariablePositionEntriesIndex() {
+ return StrongModeFreeVariableNameEntriesIndex() +
+ StrongModeFreeVariableCount();
+}
+
+
+int ScopeInfo::FunctionNameEntryIndex() {
+ return StrongModeFreeVariablePositionEntriesIndex() +
+ 2 * StrongModeFreeVariableCount();
+}
+
+
int ContextSlotCache::Hash(Object* data, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
@@ -560,8 +639,8 @@ Handle<ModuleInfo> ModuleInfo::Create(Isolate* isolate,
int i = 0;
for (ModuleDescriptor::Iterator it = descriptor->iterator(); !it.done();
it.Advance(), ++i) {
- Variable* var = scope->LookupLocal(it.name());
- info->set_name(i, *(it.name()->string()));
+ Variable* var = scope->LookupLocal(it.local_name());
+ info->set_name(i, *(it.export_name()->string()));
info->set_mode(i, var->mode());
DCHECK(var->index() >= 0);
info->set_index(i, var->index());
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 35449643ce..3d46d158ac 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -4,13 +4,12 @@
#include "src/v8.h"
-#include "src/scopes.h"
-
#include "src/accessors.h"
#include "src/bootstrapper.h"
-#include "src/compiler.h"
#include "src/messages.h"
+#include "src/parser.h"
#include "src/scopeinfo.h"
+#include "src/scopes.h"
namespace v8 {
namespace internal {
@@ -31,8 +30,7 @@ VariableMap::~VariableMap() {}
Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
- VariableMode mode, bool is_valid_lhs,
- Variable::Kind kind,
+ VariableMode mode, Variable::Kind kind,
InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag) {
// AstRawStrings are unambiguous, i.e., the same string is always represented
@@ -43,7 +41,7 @@ Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
if (p->value == NULL) {
// The variable has not been declared yet -> insert it.
DCHECK(p->key == name);
- p->value = new (zone()) Variable(scope, name, mode, is_valid_lhs, kind,
+ p->value = new (zone()) Variable(scope, name, mode, kind,
initialization_flag, maybe_assigned_flag);
}
return reinterpret_cast<Variable*>(p->value);
@@ -66,7 +64,7 @@ Variable* VariableMap::Lookup(const AstRawString* name) {
// Implementation of Scope
Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
- AstValueFactory* ast_value_factory)
+ AstValueFactory* ast_value_factory, FunctionKind function_kind)
: inner_scopes_(4, zone),
variables_(zone),
internals_(4, zone),
@@ -79,7 +77,8 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
already_resolved_(false),
ast_value_factory_(ast_value_factory),
zone_(zone) {
- SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null());
+ SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null(),
+ function_kind);
// The outermost scope must be a script scope.
DCHECK(scope_type == SCRIPT_SCOPE || outer_scope != NULL);
DCHECK(!HasIllegalRedeclaration());
@@ -131,18 +130,19 @@ Scope::Scope(Zone* zone, Scope* inner_scope,
Variable* variable = variables_.Declare(this,
catch_variable_name,
VAR,
- true, // Valid left-hand side.
Variable::NORMAL,
kCreatedInitialized);
AllocateHeapSlot(variable);
}
-void Scope::SetDefaults(ScopeType scope_type,
- Scope* outer_scope,
- Handle<ScopeInfo> scope_info) {
+void Scope::SetDefaults(ScopeType scope_type, Scope* outer_scope,
+ Handle<ScopeInfo> scope_info,
+ FunctionKind function_kind) {
outer_scope_ = outer_scope;
scope_type_ = scope_type;
+ function_kind_ = function_kind;
+ block_scope_is_class_scope_ = false;
scope_name_ = ast_value_factory_->empty_string();
dynamics_ = NULL;
receiver_ = NULL;
@@ -181,6 +181,8 @@ void Scope::SetDefaults(ScopeType scope_type,
if (!scope_info.is_null()) {
scope_calls_eval_ = scope_info->CallsEval();
language_mode_ = scope_info->language_mode();
+ block_scope_is_class_scope_ = scope_info->block_scope_is_class_scope();
+ function_kind_ = scope_info->function_kind();
}
}
@@ -248,8 +250,9 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
}
-bool Scope::Analyze(CompilationInfo* info) {
+bool Scope::Analyze(ParseInfo* info) {
DCHECK(info->function() != NULL);
+ DCHECK(info->scope() == NULL);
Scope* scope = info->function()->scope();
Scope* top = scope;
@@ -263,7 +266,12 @@ bool Scope::Analyze(CompilationInfo* info) {
// Allocate the variables.
{
AstNodeFactory ast_node_factory(info->ast_value_factory());
- if (!top->AllocateVariables(info, &ast_node_factory)) return false;
+ if (!top->AllocateVariables(info, &ast_node_factory)) {
+ DCHECK(top->pending_error_handler_.has_pending_error());
+ top->pending_error_handler_.ThrowPendingError(info->isolate(),
+ info->script());
+ return false;
+ }
}
#ifdef DEBUG
@@ -274,12 +282,13 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
- info->PrepareForCompilation(scope);
+ info->set_scope(scope);
return true;
}
-void Scope::Initialize(bool subclass_constructor) {
+void Scope::Initialize() {
+ bool subclass_constructor = IsSubclassConstructor(function_kind_);
DCHECK(!already_resolved());
// Add this scope as a new inner scope of the outer scope.
@@ -302,15 +311,15 @@ void Scope::Initialize(bool subclass_constructor) {
DCHECK(!subclass_constructor || is_function_scope());
Variable* var = variables_.Declare(
this, ast_value_factory_->this_string(),
- subclass_constructor ? CONST : VAR, false, Variable::THIS,
+ subclass_constructor ? CONST : VAR, Variable::THIS,
subclass_constructor ? kNeedsInitialization : kCreatedInitialized);
var->AllocateTo(Variable::PARAMETER, -1);
receiver_ = var;
if (subclass_constructor) {
- new_target_ = variables_.Declare(
- this, ast_value_factory_->new_target_string(), CONST, false,
- Variable::NEW_TARGET, kCreatedInitialized);
+ new_target_ =
+ variables_.Declare(this, ast_value_factory_->new_target_string(),
+ CONST, Variable::NEW_TARGET, kCreatedInitialized);
new_target_->AllocateTo(Variable::PARAMETER, -2);
new_target_->set_is_used();
}
@@ -326,7 +335,6 @@ void Scope::Initialize(bool subclass_constructor) {
variables_.Declare(this,
ast_value_factory_->arguments_string(),
VAR,
- true,
Variable::ARGUMENTS,
kCreatedInitialized);
}
@@ -402,7 +410,7 @@ Variable* Scope::LookupLocal(const AstRawString* name) {
maybe_assigned_flag = kMaybeAssigned;
}
- Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
+ Variable* var = variables_.Declare(this, name, mode, Variable::NORMAL,
init_flag, maybe_assigned_flag);
var->AllocateTo(location, index);
return var;
@@ -418,9 +426,8 @@ Variable* Scope::LookupFunctionVar(const AstRawString* name,
VariableMode mode;
int index = scope_info_->FunctionContextSlotIndex(*(name->string()), &mode);
if (index < 0) return NULL;
- Variable* var = new(zone()) Variable(
- this, name, mode, true /* is valid LHS */,
- Variable::NORMAL, kCreatedInitialized);
+ Variable* var = new (zone())
+ Variable(this, name, mode, Variable::NORMAL, kCreatedInitialized);
VariableProxy* proxy = factory->NewVariableProxy(var);
VariableDeclaration* declaration = factory->NewVariableDeclaration(
proxy, mode, this, RelocInfo::kNoPosition);
@@ -448,7 +455,7 @@ Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode,
bool is_rest) {
DCHECK(!already_resolved());
DCHECK(is_function_scope());
- Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
+ Variable* var = variables_.Declare(this, name, mode, Variable::NORMAL,
kCreatedInitialized);
if (is_rest) {
DCHECK_NULL(rest_parameter_);
@@ -461,7 +468,7 @@ Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode,
Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
- InitializationFlag init_flag,
+ InitializationFlag init_flag, Variable::Kind kind,
MaybeAssignedFlag maybe_assigned_flag) {
DCHECK(!already_resolved());
// This function handles VAR, LET, and CONST modes. DYNAMIC variables are
@@ -469,7 +476,7 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
// explicitly, and TEMPORARY variables are allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
++num_var_or_const_;
- return variables_.Declare(this, name, mode, true, Variable::NORMAL, init_flag,
+ return variables_.Declare(this, name, mode, kind, init_flag,
maybe_assigned_flag);
}
@@ -479,7 +486,6 @@ Variable* Scope::DeclareDynamicGlobal(const AstRawString* name) {
return variables_.Declare(this,
name,
DYNAMIC_GLOBAL,
- true,
Variable::NORMAL,
kCreatedInitialized);
}
@@ -502,7 +508,6 @@ Variable* Scope::NewInternal(const AstRawString* name) {
Variable* var = new(zone()) Variable(this,
name,
INTERNAL,
- false,
Variable::NORMAL,
kCreatedInitialized);
internals_.Add(var, zone());
@@ -515,7 +520,6 @@ Variable* Scope::NewTemporary(const AstRawString* name) {
Variable* var = new(zone()) Variable(this,
name,
TEMPORARY,
- true,
Variable::NORMAL,
kCreatedInitialized);
temps_.Add(var, zone());
@@ -582,8 +586,9 @@ class VarAndOrder {
};
-void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
- ZoneList<Variable*>* context_locals) {
+void Scope::CollectStackAndContextLocals(
+ ZoneList<Variable*>* stack_locals, ZoneList<Variable*>* context_locals,
+ ZoneList<Variable*>* strong_mode_free_variables) {
DCHECK(stack_locals != NULL);
DCHECK(context_locals != NULL);
@@ -617,6 +622,11 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
+ if (strong_mode_free_variables && var->has_strong_mode_reference() &&
+ var->mode() == DYNAMIC_GLOBAL) {
+ strong_mode_free_variables->Add(var, zone());
+ }
+
if (var->is_used()) {
vars.Add(VarAndOrder(var, p->order), zone());
}
@@ -634,7 +644,7 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
}
-bool Scope::AllocateVariables(CompilationInfo* info, AstNodeFactory* factory) {
+bool Scope::AllocateVariables(ParseInfo* info, AstNodeFactory* factory) {
// 1) Propagate scope information.
bool outer_scope_calls_sloppy_eval = false;
if (outer_scope_ != NULL) {
@@ -645,9 +655,9 @@ bool Scope::AllocateVariables(CompilationInfo* info, AstNodeFactory* factory) {
PropagateScopeInfo(outer_scope_calls_sloppy_eval);
// 2) Allocate module instances.
- if (FLAG_harmony_modules && (is_script_scope() || is_module_scope())) {
+ if (FLAG_harmony_modules && is_script_scope()) {
DCHECK(num_modules_ == 0);
- AllocateModulesRecursively(this);
+ AllocateModules();
}
// 3) Resolve variables.
@@ -768,6 +778,20 @@ void Scope::GetNestedScopeChain(Isolate* isolate,
}
+void Scope::ReportMessage(int start_position, int end_position,
+ const char* message, const AstRawString* arg) {
+ // Propagate the error to the topmost scope targeted by this scope analysis
+ // phase.
+ Scope* top = this;
+ while (!top->is_script_scope() && !top->outer_scope()->already_resolved()) {
+ top = top->outer_scope();
+ }
+
+ top->pending_error_handler_.ReportMessageAt(start_position, end_position,
+ message, arg, kReferenceError);
+}
+
+
#ifdef DEBUG
static const char* Header(ScopeType scope_type) {
switch (scope_type) {
@@ -962,7 +986,6 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
var = map->Declare(NULL,
name,
mode,
- true,
Variable::NORMAL,
init_flag);
// Allocate it by giving it a dynamic lookup.
@@ -1036,7 +1059,7 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
}
-bool Scope::ResolveVariable(CompilationInfo* info, VariableProxy* proxy,
+bool Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy,
AstNodeFactory* factory) {
DCHECK(info->script_scope()->is_script_scope());
@@ -1050,6 +1073,9 @@ bool Scope::ResolveVariable(CompilationInfo* info, VariableProxy* proxy,
switch (binding_kind) {
case BOUND:
// We found a variable binding.
+ if (is_strong(language_mode())) {
+ if (!CheckStrongModeDeclaration(proxy, var)) return false;
+ }
break;
case BOUND_EVAL_SHADOWED:
@@ -1087,13 +1113,70 @@ bool Scope::ResolveVariable(CompilationInfo* info, VariableProxy* proxy,
DCHECK(var != NULL);
if (proxy->is_assigned()) var->set_maybe_assigned();
+ if (is_strong(language_mode())) {
+ // Record that the variable is referred to from strong mode. Also, record
+ // the position.
+ var->RecordStrongModeReference(proxy->position(), proxy->end_position());
+ }
+
proxy->BindTo(var);
return true;
}
-bool Scope::ResolveVariablesRecursively(CompilationInfo* info,
+bool Scope::CheckStrongModeDeclaration(VariableProxy* proxy, Variable* var) {
+ // Check for declaration-after use (for variables) in strong mode. Note that
+ // we can only do this in the case where we have seen the declaration. And we
+ // always allow referencing functions (for now).
+
+ // Allow referencing the class name from methods of that class, even though
+ // the initializer position for class names is only after the body.
+ Scope* scope = this;
+ while (scope) {
+ if (scope->ClassVariableForMethod() == var) return true;
+ scope = scope->outer_scope();
+ }
+
+ // If both the use and the declaration are inside an eval scope (possibly
+ // indirectly), or one of them is, we need to check whether they are inside
+ // the same eval scope or different ones.
+
+ // TODO(marja,rossberg): Detect errors across different evals (depends on the
+ // future of eval in strong mode).
+ const Scope* eval_for_use = NearestOuterEvalScope();
+ const Scope* eval_for_declaration = var->scope()->NearestOuterEvalScope();
+
+ if (proxy->position() != RelocInfo::kNoPosition &&
+ proxy->position() < var->initializer_position() && !var->is_function() &&
+ eval_for_use == eval_for_declaration) {
+ DCHECK(proxy->end_position() != RelocInfo::kNoPosition);
+ ReportMessage(proxy->position(), proxy->end_position(),
+ "strong_use_before_declaration", proxy->raw_name());
+ return false;
+ }
+ return true;
+}
+
+
+Variable* Scope::ClassVariableForMethod() const {
+ if (!is_function_scope()) return nullptr;
+ if (IsInObjectLiteral(function_kind_)) return nullptr;
+ if (!IsConciseMethod(function_kind_) && !IsConstructor(function_kind_) &&
+ !IsAccessorFunction(function_kind_)) {
+ return nullptr;
+ }
+ DCHECK_NOT_NULL(outer_scope_);
+ DCHECK(outer_scope_->is_class_scope());
+ // The class scope contains at most one variable, the class name.
+ DCHECK(outer_scope_->variables_.occupancy() <= 1);
+ if (outer_scope_->variables_.occupancy() == 0) return nullptr;
+ VariableMap::Entry* p = outer_scope_->variables_.Start();
+ return reinterpret_cast<Variable*>(p->value);
+}
+
+
+bool Scope::ResolveVariablesRecursively(ParseInfo* info,
AstNodeFactory* factory) {
DCHECK(info->script_scope()->is_script_scope());
@@ -1358,19 +1441,18 @@ void Scope::AllocateVariablesRecursively(Isolate* isolate) {
}
-void Scope::AllocateModulesRecursively(Scope* host_scope) {
- if (already_resolved()) return;
- if (is_module_scope()) {
- DCHECK(module_descriptor_->IsFrozen());
- DCHECK(module_var_ == NULL);
- module_var_ =
- host_scope->NewInternal(ast_value_factory_->dot_module_string());
- ++host_scope->num_modules_;
- }
-
+void Scope::AllocateModules() {
+ DCHECK(is_script_scope());
+ DCHECK(!already_resolved());
for (int i = 0; i < inner_scopes_.length(); i++) {
- Scope* inner_scope = inner_scopes_.at(i);
- inner_scope->AllocateModulesRecursively(host_scope);
+ Scope* scope = inner_scopes_.at(i);
+ if (scope->is_module_scope()) {
+ DCHECK(!scope->already_resolved());
+ DCHECK(scope->module_descriptor_->IsFrozen());
+ DCHECK_NULL(scope->module_var_);
+ scope->module_var_ = NewInternal(ast_value_factory_->dot_module_string());
+ ++num_modules_;
+ }
}
}
@@ -1386,5 +1468,4 @@ int Scope::ContextLocalCount() const {
return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
(function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index c58d124939..cce8c3c9ea 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -6,13 +6,13 @@
#define V8_SCOPES_H_
#include "src/ast.h"
+#include "src/pending-compilation-error-handler.h"
#include "src/zone.h"
namespace v8 {
namespace internal {
-class CompilationInfo;
-
+class ParseInfo;
// A hash map to support fast variable declaration and lookup.
class VariableMap: public ZoneHashMap {
@@ -22,8 +22,7 @@ class VariableMap: public ZoneHashMap {
virtual ~VariableMap();
Variable* Declare(Scope* scope, const AstRawString* name, VariableMode mode,
- bool is_valid_lhs, Variable::Kind kind,
- InitializationFlag initialization_flag,
+ Variable::Kind kind, InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
Variable* Lookup(const AstRawString* name);
@@ -72,12 +71,13 @@ class Scope: public ZoneObject {
// Construction
Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
- AstValueFactory* value_factory);
+ AstValueFactory* value_factory,
+ FunctionKind function_kind = kNormalFunction);
// Compute top scope and allocate variables. For lazy compilation the top
// scope only contains the single lazily compiled function, so this
// doesn't re-allocate variables repeatedly.
- static bool Analyze(CompilationInfo* info);
+ static bool Analyze(ParseInfo* info);
static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
Context* context, Scope* script_scope);
@@ -87,7 +87,7 @@ class Scope: public ZoneObject {
scope_name_ = scope_name;
}
- void Initialize(bool subclass_constructor = false);
+ void Initialize();
// Checks if the block scope is redundant, i.e. it does not contain any
// block scoped declarations. In that case it is removed from the scope
@@ -130,7 +130,7 @@ class Scope: public ZoneObject {
// Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned.
Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
- InitializationFlag init_flag,
+ InitializationFlag init_flag, Variable::Kind kind,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
// Declare an implicit global variable in this scope which must be a
@@ -142,12 +142,14 @@ class Scope: public ZoneObject {
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
const AstRawString* name,
- int position = RelocInfo::kNoPosition) {
+ int start_position = RelocInfo::kNoPosition,
+ int end_position = RelocInfo::kNoPosition) {
// Note that we must not share the unresolved variables with
// the same name because they may be removed selectively via
// RemoveUnresolved().
DCHECK(!already_resolved());
- VariableProxy* proxy = factory->NewVariableProxy(name, false, position);
+ VariableProxy* proxy = factory->NewVariableProxy(
+ name, Variable::NORMAL, start_position, end_position);
unresolved_.Add(proxy, zone_);
return proxy;
}
@@ -278,6 +280,13 @@ class Scope: public ZoneObject {
bool is_block_scope() const { return scope_type_ == BLOCK_SCOPE; }
bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
bool is_arrow_scope() const { return scope_type_ == ARROW_SCOPE; }
+ void tag_as_class_scope() {
+ DCHECK(is_block_scope());
+ block_scope_is_class_scope_ = true;
+ }
+ bool is_class_scope() const {
+ return is_block_scope() && block_scope_is_class_scope_;
+ }
bool is_declaration_scope() const {
return is_eval_scope() || is_function_scope() ||
is_module_scope() || is_script_scope();
@@ -317,12 +326,20 @@ class Scope: public ZoneObject {
// Does any inner scope access "this".
bool inner_uses_this() const { return inner_scope_uses_this_; }
+ const Scope* NearestOuterEvalScope() const {
+ if (is_eval_scope()) return this;
+ if (outer_scope() == nullptr) return nullptr;
+ return outer_scope()->NearestOuterEvalScope();
+ }
+
// ---------------------------------------------------------------------------
// Accessors.
// The type of this scope.
ScopeType scope_type() const { return scope_type_; }
+ FunctionKind function_kind() const { return function_kind_; }
+
// The language mode of this scope.
LanguageMode language_mode() const { return language_mode_; }
@@ -397,8 +414,9 @@ class Scope: public ZoneObject {
// Collect stack and context allocated local variables in this scope. Note
// that the function variable - if present - is not collected and should be
// handled separately.
- void CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
- ZoneList<Variable*>* context_locals);
+ void CollectStackAndContextLocals(
+ ZoneList<Variable*>* stack_locals, ZoneList<Variable*>* context_locals,
+ ZoneList<Variable*>* strong_mode_free_variables = nullptr);
// Current number of var or const locals.
int num_var_or_const() { return num_var_or_const_; }
@@ -470,6 +488,10 @@ class Scope: public ZoneObject {
return params_.Contains(variables_.Lookup(name));
}
+ // Error handling.
+ void ReportMessage(int start_position, int end_position, const char* message,
+ const AstRawString* arg);
+
// ---------------------------------------------------------------------------
// Debugging.
@@ -488,6 +510,10 @@ class Scope: public ZoneObject {
// The scope type.
ScopeType scope_type_;
+ // Some block scopes are tagged as class scopes.
+ bool block_scope_is_class_scope_;
+ // If the scope is a function scope, this is the function kind.
+ FunctionKind function_kind_;
// Debugging support.
const AstRawString* scope_name_;
@@ -639,11 +665,16 @@ class Scope: public ZoneObject {
Variable* LookupRecursive(VariableProxy* proxy, BindingKind* binding_kind,
AstNodeFactory* factory);
MUST_USE_RESULT
- bool ResolveVariable(CompilationInfo* info, VariableProxy* proxy,
+ bool ResolveVariable(ParseInfo* info, VariableProxy* proxy,
AstNodeFactory* factory);
MUST_USE_RESULT
- bool ResolveVariablesRecursively(CompilationInfo* info,
- AstNodeFactory* factory);
+ bool ResolveVariablesRecursively(ParseInfo* info, AstNodeFactory* factory);
+
+ bool CheckStrongModeDeclaration(VariableProxy* proxy, Variable* var);
+
+ // If this scope is a method scope of a class, return the corresponding
+ // class variable, otherwise nullptr.
+ Variable* ClassVariableForMethod() const;
// Scope analysis.
void PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
@@ -661,7 +692,7 @@ class Scope: public ZoneObject {
void AllocateNonParameterLocal(Isolate* isolate, Variable* var);
void AllocateNonParameterLocals(Isolate* isolate);
void AllocateVariablesRecursively(Isolate* isolate);
- void AllocateModulesRecursively(Scope* host_scope);
+ void AllocateModules();
// Resolve and fill in the allocation information for all variables
// in this scopes. Must be called *after* all scopes have been
@@ -672,7 +703,7 @@ class Scope: public ZoneObject {
// parameter is the context in which eval was called. In all other
// cases the context parameter is an empty handle.
MUST_USE_RESULT
- bool AllocateVariables(CompilationInfo* info, AstNodeFactory* factory);
+ bool AllocateVariables(ParseInfo* info, AstNodeFactory* factory);
private:
// Construct a scope based on the scope info.
@@ -690,12 +721,14 @@ class Scope: public ZoneObject {
}
}
- void SetDefaults(ScopeType type,
- Scope* outer_scope,
- Handle<ScopeInfo> scope_info);
+ void SetDefaults(ScopeType type, Scope* outer_scope,
+ Handle<ScopeInfo> scope_info,
+ FunctionKind function_kind = kNormalFunction);
AstValueFactory* ast_value_factory_;
Zone* zone_;
+
+ PendingCompilationErrorHandler pending_error_handler_;
};
} } // namespace v8::internal
diff --git a/deps/v8/src/snapshot/DEPS b/deps/v8/src/snapshot/DEPS
new file mode 100644
index 0000000000..810dfd6e84
--- /dev/null
+++ b/deps/v8/src/snapshot/DEPS
@@ -0,0 +1,5 @@
+specific_include_rules = {
+ "mksnapshot\.cc": [
+ "+include/libplatform/libplatform.h",
+ ],
+}
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index bc18aebb69..79c1643fb8 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -14,8 +14,8 @@
#include "src/bootstrapper.h"
#include "src/flags.h"
#include "src/list.h"
-#include "src/natives.h"
-#include "src/serialize.h"
+#include "src/snapshot/natives.h"
+#include "src/snapshot/serialize.h"
using namespace v8;
@@ -64,17 +64,14 @@ class SnapshotWriter {
fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
fprintf(fp_, "#include \"src/v8.h\"\n");
fprintf(fp_, "#include \"src/base/platform/platform.h\"\n\n");
- fprintf(fp_, "#include \"src/snapshot.h\"\n\n");
+ fprintf(fp_, "#include \"src/snapshot/snapshot.h\"\n\n");
fprintf(fp_, "namespace v8 {\n");
fprintf(fp_, "namespace internal {\n\n");
}
void WriteFileSuffix() const {
- fprintf(fp_, "const v8::StartupData Snapshot::SnapshotBlob() {\n");
- fprintf(fp_, " v8::StartupData blob;\n");
- fprintf(fp_, " blob.data = reinterpret_cast<const char*>(blob_data);\n");
- fprintf(fp_, " blob.raw_size = blob_size;\n");
- fprintf(fp_, " return blob;\n");
+ fprintf(fp_, "const v8::StartupData* Snapshot::DefaultSnapshotBlob() {\n");
+ fprintf(fp_, " return &blob;\n");
fprintf(fp_, "}\n\n");
fprintf(fp_, "} // namespace internal\n");
fprintf(fp_, "} // namespace v8\n");
@@ -85,7 +82,8 @@ class SnapshotWriter {
WriteSnapshotData(blob);
fprintf(fp_, "};\n");
fprintf(fp_, "static const int blob_size = %d;\n", blob.length());
- fprintf(fp_, "\n");
+ fprintf(fp_, "static const v8::StartupData blob =\n");
+ fprintf(fp_, "{ (const char*) blob_data, blob_size };\n");
}
void WriteSnapshotData(const i::Vector<const i::byte>& blob) const {
@@ -140,11 +138,6 @@ char* GetExtraCode(char* filename) {
int main(int argc, char** argv) {
// By default, log code create information in the snapshot.
i::FLAG_log_code = true;
-
- // Omit from the snapshot natives for features that can be turned off
- // at runtime.
- i::FLAG_harmony_shipping = false;
-
i::FLAG_logfile_per_isolate = false;
// Print the usage if an error occurs when parsing the command line
diff --git a/deps/v8/src/natives-external.cc b/deps/v8/src/snapshot/natives-external.cc
index e601808fe8..14def47409 100644
--- a/deps/v8/src/natives-external.cc
+++ b/deps/v8/src/snapshot/natives-external.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/natives.h"
+#include "src/snapshot/natives.h"
#include "src/base/logging.h"
#include "src/list.h"
#include "src/list-inl.h"
-#include "src/snapshot-source-sink.h"
+#include "src/snapshot/snapshot-source-sink.h"
#include "src/vector.h"
#ifndef V8_USE_EXTERNAL_STARTUP_DATA
@@ -132,6 +132,11 @@ class NativesHolder {
DCHECK(store);
holder_ = store;
}
+ static bool empty() { return holder_ == NULL; }
+ static void Dispose() {
+ delete holder_;
+ holder_ = NULL;
+ }
private:
static NativesStore* holder_;
@@ -141,19 +146,45 @@ template<NativeType type>
NativesStore* NativesHolder<type>::holder_ = NULL;
+// The natives blob. Memory is owned by caller.
+static StartupData* natives_blob_ = NULL;
+
+
+/**
+ * Read the Natives blob, as previously set by SetNativesFromFile.
+ */
+void ReadNatives() {
+ if (natives_blob_ && NativesHolder<CORE>::empty()) {
+ SnapshotByteSource bytes(natives_blob_->data, natives_blob_->raw_size);
+ NativesHolder<CORE>::set(NativesStore::MakeFromScriptsSource(&bytes));
+ NativesHolder<EXPERIMENTAL>::set(
+ NativesStore::MakeFromScriptsSource(&bytes));
+ DCHECK(!bytes.HasMore());
+ }
+}
+
+
/**
- * Read the Natives (library sources) blob, as generated by js2c + the build
+ * Set the Natives (library sources) blob, as generated by js2c + the build
* system.
*/
void SetNativesFromFile(StartupData* natives_blob) {
+ DCHECK(!natives_blob_);
DCHECK(natives_blob);
DCHECK(natives_blob->data);
DCHECK(natives_blob->raw_size > 0);
- SnapshotByteSource bytes(natives_blob->data, natives_blob->raw_size);
- NativesHolder<CORE>::set(NativesStore::MakeFromScriptsSource(&bytes));
- NativesHolder<EXPERIMENTAL>::set(NativesStore::MakeFromScriptsSource(&bytes));
- DCHECK(!bytes.HasMore());
+ natives_blob_ = natives_blob;
+ ReadNatives();
+}
+
+
+/**
+ * Release memory allocated by SetNativesFromFile.
+ */
+void DisposeNatives() {
+ NativesHolder<CORE>::Dispose();
+ NativesHolder<EXPERIMENTAL>::Dispose();
}
diff --git a/deps/v8/src/natives.h b/deps/v8/src/snapshot/natives.h
index 7ce7213edf..357faad1f8 100644
--- a/deps/v8/src/natives.h
+++ b/deps/v8/src/snapshot/natives.h
@@ -40,6 +40,8 @@ typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
// Used for reading the natives at runtime. Implementation in natives-empty.cc
void SetNativesFromFile(StartupData* natives_blob);
+void ReadNatives();
+void DisposeNatives();
#endif
} } // namespace v8::internal
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/snapshot/serialize.cc
index 8048f41e7b..13ac04df8f 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/snapshot/serialize.cc
@@ -9,18 +9,19 @@
#include "src/base/platform/platform.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
-#include "src/compiler.h"
+#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/global-handles.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/natives.h"
#include "src/objects.h"
+#include "src/parser.h"
#include "src/runtime/runtime.h"
-#include "src/serialize.h"
-#include "src/snapshot.h"
-#include "src/snapshot-source-sink.h"
+#include "src/snapshot/natives.h"
+#include "src/snapshot/serialize.h"
+#include "src/snapshot/snapshot.h"
+#include "src/snapshot/snapshot-source-sink.h"
#include "src/v8threads.h"
#include "src/version.h"
@@ -31,20 +32,6 @@ namespace internal {
// -----------------------------------------------------------------------------
// Coding of external references.
-// The encoding of an external reference. The type is in the high word.
-// The id is in the low word.
-static uint32_t EncodeExternal(TypeCode type, uint16_t id) {
- return static_cast<uint32_t>(type) << 16 | id;
-}
-
-
-static int* GetInternalPointer(StatsCounter* counter) {
- // All counters refer to dummy_counter, if deserializing happens without
- // setting up counters.
- static int dummy_counter = 0;
- return counter->Enabled() ? counter->GetInternalPointer() : &dummy_counter;
-}
-
ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
ExternalReferenceTable* external_reference_table =
@@ -57,63 +44,7 @@ ExternalReferenceTable* ExternalReferenceTable::instance(Isolate* isolate) {
}
-void ExternalReferenceTable::AddFromId(TypeCode type,
- uint16_t id,
- const char* name,
- Isolate* isolate) {
- Address address;
- switch (type) {
- case C_BUILTIN: {
- ExternalReference ref(static_cast<Builtins::CFunctionId>(id), isolate);
- address = ref.address();
- break;
- }
- case BUILTIN: {
- ExternalReference ref(static_cast<Builtins::Name>(id), isolate);
- address = ref.address();
- break;
- }
- case RUNTIME_FUNCTION: {
- ExternalReference ref(static_cast<Runtime::FunctionId>(id), isolate);
- address = ref.address();
- break;
- }
- case IC_UTILITY: {
- ExternalReference ref(IC_Utility(static_cast<IC::UtilityId>(id)),
- isolate);
- address = ref.address();
- break;
- }
- default:
- UNREACHABLE();
- return;
- }
- Add(address, type, id, name);
-}
-
-
-void ExternalReferenceTable::Add(Address address,
- TypeCode type,
- uint16_t id,
- const char* name) {
- DCHECK_NOT_NULL(address);
- ExternalReferenceEntry entry;
- entry.address = address;
- entry.code = EncodeExternal(type, id);
- entry.name = name;
- DCHECK_NE(0u, entry.code);
- // Assert that the code is added in ascending order to rule out duplicates.
- DCHECK((size() == 0) || (code(size() - 1) < entry.code));
- refs_.Add(entry);
- if (id > max_id_[type]) max_id_[type] = id;
-}
-
-
-void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
- for (int type_code = 0; type_code < kTypeCodeCount; type_code++) {
- max_id_[type_code] = 0;
- }
-
+ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
// Miscellaneous
Add(ExternalReference::roots_array_start(isolate).address(),
"Heap::roots_array_start()");
@@ -171,17 +102,9 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
"date_cache_stamp");
Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
"address_of_pending_message_obj");
- Add(ExternalReference::address_of_has_pending_message(isolate).address(),
- "address_of_has_pending_message");
- Add(ExternalReference::address_of_pending_message_script(isolate).address(),
- "pending_message_script");
Add(ExternalReference::get_make_code_young_function(isolate).address(),
"Code::MakeCodeYoung");
Add(ExternalReference::cpu_features().address(), "cpu_features");
- Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
- "Runtime::AllocateInNewSpace");
- Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
- "Runtime::AllocateInTargetSpace");
Add(ExternalReference::old_pointer_space_allocation_top_address(isolate)
.address(),
"Heap::OldPointerSpaceAllocationTopAddress");
@@ -217,9 +140,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
"double_constants.minus_one_half");
Add(ExternalReference::stress_deopt_count(isolate).address(),
"Isolate::stress_deopt_count_address()");
- Add(ExternalReference::incremental_marking_record_write_function(isolate)
- .address(),
- "IncrementalMarking::RecordWriteFromCode");
// Debug addresses
Add(ExternalReference::debug_after_break_target_address(isolate).address(),
@@ -259,143 +179,150 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
// new references.
struct RefTableEntry {
- TypeCode type;
uint16_t id;
const char* name;
};
- static const RefTableEntry ref_table[] = {
- // Builtins
-#define DEF_ENTRY_C(name, ignored) \
- { C_BUILTIN, \
- Builtins::c_##name, \
- "Builtins::" #name },
-
- BUILTIN_LIST_C(DEF_ENTRY_C)
+ static const RefTableEntry c_builtins[] = {
+#define DEF_ENTRY_C(name, ignored) \
+ { Builtins::c_##name, "Builtins::" #name } \
+ ,
+ BUILTIN_LIST_C(DEF_ENTRY_C)
#undef DEF_ENTRY_C
+ };
-#define DEF_ENTRY_C(name, ignored) \
- { BUILTIN, \
- Builtins::k##name, \
- "Builtins::" #name },
-#define DEF_ENTRY_A(name, kind, state, extra) DEF_ENTRY_C(name, ignored)
-
- BUILTIN_LIST_C(DEF_ENTRY_C)
- BUILTIN_LIST_A(DEF_ENTRY_A)
- BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
+ for (unsigned i = 0; i < arraysize(c_builtins); ++i) {
+ ExternalReference ref(static_cast<Builtins::CFunctionId>(c_builtins[i].id),
+ isolate);
+ Add(ref.address(), c_builtins[i].name);
+ }
+
+ static const RefTableEntry builtins[] = {
+#define DEF_ENTRY_C(name, ignored) \
+ { Builtins::k##name, "Builtins::" #name } \
+ ,
+#define DEF_ENTRY_A(name, i1, i2, i3) \
+ { Builtins::k##name, "Builtins::" #name } \
+ ,
+ BUILTIN_LIST_C(DEF_ENTRY_C) BUILTIN_LIST_A(DEF_ENTRY_A)
+ BUILTIN_LIST_DEBUG_A(DEF_ENTRY_A)
#undef DEF_ENTRY_C
#undef DEF_ENTRY_A
+ };
- // Runtime functions
-#define RUNTIME_ENTRY(name, nargs, ressize) \
- { RUNTIME_FUNCTION, \
- Runtime::k##name, \
- "Runtime::" #name },
+ for (unsigned i = 0; i < arraysize(builtins); ++i) {
+ ExternalReference ref(static_cast<Builtins::Name>(builtins[i].id), isolate);
+ Add(ref.address(), builtins[i].name);
+ }
- RUNTIME_FUNCTION_LIST(RUNTIME_ENTRY)
- INLINE_OPTIMIZED_FUNCTION_LIST(RUNTIME_ENTRY)
+ static const RefTableEntry runtime_functions[] = {
+#define RUNTIME_ENTRY(name, i1, i2) \
+ { Runtime::k##name, "Runtime::" #name } \
+ ,
+ FOR_EACH_INTRINSIC(RUNTIME_ENTRY)
#undef RUNTIME_ENTRY
+ };
-#define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize) \
- { RUNTIME_FUNCTION, \
- Runtime::kInlineOptimized##name, \
- "Runtime::" #name },
-
- INLINE_OPTIMIZED_FUNCTION_LIST(INLINE_OPTIMIZED_ENTRY)
-#undef INLINE_OPTIMIZED_ENTRY
-
- // IC utilities
-#define IC_ENTRY(name) \
- { IC_UTILITY, \
- IC::k##name, \
- "IC::" #name },
+ for (unsigned i = 0; i < arraysize(runtime_functions); ++i) {
+ ExternalReference ref(
+ static_cast<Runtime::FunctionId>(runtime_functions[i].id), isolate);
+ Add(ref.address(), runtime_functions[i].name);
+ }
- IC_UTIL_LIST(IC_ENTRY)
+ static const RefTableEntry inline_caches[] = {
+#define IC_ENTRY(name) \
+ { IC::k##name, "IC::" #name } \
+ ,
+ IC_UTIL_LIST(IC_ENTRY)
#undef IC_ENTRY
- }; // end of ref_table[].
+ };
- for (size_t i = 0; i < arraysize(ref_table); ++i) {
- AddFromId(ref_table[i].type,
- ref_table[i].id,
- ref_table[i].name,
- isolate);
+ for (unsigned i = 0; i < arraysize(inline_caches); ++i) {
+ ExternalReference ref(
+ IC_Utility(static_cast<IC::UtilityId>(inline_caches[i].id)), isolate);
+ Add(ref.address(), runtime_functions[i].name);
}
// Stat counters
struct StatsRefTableEntry {
StatsCounter* (Counters::*counter)();
- uint16_t id;
const char* name;
};
- const StatsRefTableEntry stats_ref_table[] = {
-#define COUNTER_ENTRY(name, caption) \
- { &Counters::name, \
- Counters::k_##name, \
- "Counters::" #name },
-
- STATS_COUNTER_LIST_1(COUNTER_ENTRY)
- STATS_COUNTER_LIST_2(COUNTER_ENTRY)
+ static const StatsRefTableEntry stats_ref_table[] = {
+#define COUNTER_ENTRY(name, caption) \
+ { &Counters::name, "Counters::" #name } \
+ ,
+ STATS_COUNTER_LIST_1(COUNTER_ENTRY) STATS_COUNTER_LIST_2(COUNTER_ENTRY)
#undef COUNTER_ENTRY
- }; // end of stats_ref_table[].
+ };
Counters* counters = isolate->counters();
- for (size_t i = 0; i < arraysize(stats_ref_table); ++i) {
- Add(reinterpret_cast<Address>(GetInternalPointer(
- (counters->*(stats_ref_table[i].counter))())),
- STATS_COUNTER,
- stats_ref_table[i].id,
- stats_ref_table[i].name);
+ for (unsigned i = 0; i < arraysize(stats_ref_table); ++i) {
+ // To make sure the indices are not dependent on whether counters are
+ // enabled, use a dummy address as filler.
+ Address address = NotAvailable();
+ StatsCounter* counter = (counters->*(stats_ref_table[i].counter))();
+ if (counter->Enabled()) {
+ address = reinterpret_cast<Address>(counter->GetInternalPointer());
+ }
+ Add(address, stats_ref_table[i].name);
}
// Top addresses
-
- const char* AddressNames[] = {
-#define BUILD_NAME_LITERAL(CamelName, hacker_name) \
- "Isolate::" #hacker_name "_address",
- FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL)
- NULL
+ static const char* address_names[] = {
+#define BUILD_NAME_LITERAL(Name, name) "Isolate::" #name "_address",
+ FOR_EACH_ISOLATE_ADDRESS_NAME(BUILD_NAME_LITERAL) NULL
#undef BUILD_NAME_LITERAL
};
- for (uint16_t i = 0; i < Isolate::kIsolateAddressCount; ++i) {
- Add(isolate->get_address_from_id((Isolate::AddressId)i),
- TOP_ADDRESS, i, AddressNames[i]);
+ for (int i = 0; i < Isolate::kIsolateAddressCount; ++i) {
+ Add(isolate->get_address_from_id(static_cast<Isolate::AddressId>(i)),
+ address_names[i]);
}
// Accessors
-#define ACCESSOR_INFO_DECLARATION(name) \
- Add(FUNCTION_ADDR(&Accessors::name##Getter), ACCESSOR_CODE, \
- Accessors::k##name##Getter, "Accessors::" #name "Getter"); \
- Add(FUNCTION_ADDR(&Accessors::name##Setter), ACCESSOR_CODE, \
- Accessors::k##name##Setter, "Accessors::" #name "Setter");
- ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
+ struct AccessorRefTable {
+ Address address;
+ const char* name;
+ };
+
+ static const AccessorRefTable accessors[] = {
+#define ACCESSOR_INFO_DECLARATION(name) \
+ { FUNCTION_ADDR(&Accessors::name##Getter), "Accessors::" #name "Getter" } \
+ , {FUNCTION_ADDR(&Accessors::name##Setter), "Accessors::" #name "Setter"},
+ ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
+ };
+
+ for (unsigned i = 0; i < arraysize(accessors); ++i) {
+ Add(accessors[i].address, accessors[i].name);
+ }
StubCache* stub_cache = isolate->stub_cache();
// Stub cache tables
Add(stub_cache->key_reference(StubCache::kPrimary).address(),
- STUB_CACHE_TABLE, 1, "StubCache::primary_->key");
+ "StubCache::primary_->key");
Add(stub_cache->value_reference(StubCache::kPrimary).address(),
- STUB_CACHE_TABLE, 2, "StubCache::primary_->value");
+ "StubCache::primary_->value");
Add(stub_cache->map_reference(StubCache::kPrimary).address(),
- STUB_CACHE_TABLE, 3, "StubCache::primary_->map");
+ "StubCache::primary_->map");
Add(stub_cache->key_reference(StubCache::kSecondary).address(),
- STUB_CACHE_TABLE, 4, "StubCache::secondary_->key");
+ "StubCache::secondary_->key");
Add(stub_cache->value_reference(StubCache::kSecondary).address(),
- STUB_CACHE_TABLE, 5, "StubCache::secondary_->value");
+ "StubCache::secondary_->value");
Add(stub_cache->map_reference(StubCache::kSecondary).address(),
- STUB_CACHE_TABLE, 6, "StubCache::secondary_->map");
+ "StubCache::secondary_->map");
// Runtime entries
Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
- RUNTIME_ENTRY, 1, "HandleScope::DeleteExtensions");
+ "HandleScope::DeleteExtensions");
Add(ExternalReference::incremental_marking_record_write_function(isolate)
.address(),
- RUNTIME_ENTRY, 2, "IncrementalMarking::RecordWrite");
+ "IncrementalMarking::RecordWrite");
Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
- RUNTIME_ENTRY, 3, "StoreBuffer::StoreBufferOverflow");
+ "StoreBuffer::StoreBufferOverflow");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
@@ -406,94 +333,69 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, entry, "lazy_deopt");
+ Add(address, "lazy_deopt");
}
}
-ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate)
- : encodings_(HashMap::PointersMatch),
- isolate_(isolate) {
- ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance(isolate_);
- for (int i = 0; i < external_references->size(); ++i) {
- Put(external_references->address(i), i);
+ExternalReferenceEncoder::ExternalReferenceEncoder(Isolate* isolate) {
+ map_ = isolate->external_reference_map();
+ if (map_ != NULL) return;
+ map_ = new HashMap(HashMap::PointersMatch);
+ ExternalReferenceTable* table = ExternalReferenceTable::instance(isolate);
+ for (int i = 0; i < table->size(); ++i) {
+ Address addr = table->address(i);
+ if (addr == ExternalReferenceTable::NotAvailable()) continue;
+ // We expect no duplicate external references entries in the table.
+ DCHECK_NULL(map_->Lookup(addr, Hash(addr), false));
+ map_->Lookup(addr, Hash(addr), true)->value = reinterpret_cast<void*>(i);
}
+ isolate->set_external_reference_map(map_);
}
-uint32_t ExternalReferenceEncoder::Encode(Address key) const {
- int index = IndexOf(key);
- DCHECK(key == NULL || index >= 0);
- return index >= 0 ?
- ExternalReferenceTable::instance(isolate_)->code(index) : 0;
-}
-
-
-const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
- int index = IndexOf(key);
- return index >= 0 ? ExternalReferenceTable::instance(isolate_)->name(index)
- : "<unknown>";
-}
-
-
-int ExternalReferenceEncoder::IndexOf(Address key) const {
- if (key == NULL) return -1;
+uint32_t ExternalReferenceEncoder::Encode(Address address) const {
+ DCHECK_NOT_NULL(address);
HashMap::Entry* entry =
- const_cast<HashMap&>(encodings_).Lookup(key, Hash(key), false);
- return entry == NULL
- ? -1
- : static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
+ const_cast<HashMap*>(map_)->Lookup(address, Hash(address), false);
+ DCHECK_NOT_NULL(entry);
+ return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
}
-void ExternalReferenceEncoder::Put(Address key, int index) {
- HashMap::Entry* entry = encodings_.Lookup(key, Hash(key), true);
- entry->value = reinterpret_cast<void*>(index);
-}
-
-
-ExternalReferenceDecoder::ExternalReferenceDecoder(Isolate* isolate)
- : encodings_(NewArray<Address*>(kTypeCodeCount)),
- isolate_(isolate) {
- ExternalReferenceTable* external_references =
- ExternalReferenceTable::instance(isolate_);
- for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
- int max = external_references->max_id(type) + 1;
- encodings_[type] = NewArray<Address>(max + 1);
- }
- for (int i = 0; i < external_references->size(); ++i) {
- Put(external_references->code(i), external_references->address(i));
- }
-}
-
-
-ExternalReferenceDecoder::~ExternalReferenceDecoder() {
- for (int type = kFirstTypeCode; type < kTypeCodeCount; ++type) {
- DeleteArray(encodings_[type]);
- }
- DeleteArray(encodings_);
+const char* ExternalReferenceEncoder::NameOfAddress(Isolate* isolate,
+ Address address) const {
+ HashMap::Entry* entry =
+ const_cast<HashMap*>(map_)->Lookup(address, Hash(address), false);
+ if (entry == NULL) return "<unknown>";
+ uint32_t i = static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
+ return ExternalReferenceTable::instance(isolate)->name(i);
}
RootIndexMap::RootIndexMap(Isolate* isolate) {
+ map_ = isolate->root_index_map();
+ if (map_ != NULL) return;
map_ = new HashMap(HashMap::PointersMatch);
Object** root_array = isolate->heap()->roots_array_start();
- for (int i = 0; i < Heap::kStrongRootListLength; i++) {
- Object* root = root_array[i];
- if (root->IsHeapObject() && !isolate->heap()->InNewSpace(root)) {
+ for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
+ Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
+ Object* root = root_array[root_index];
+ // Omit root entries that can be written after initialization. They must
+ // not be referenced through the root list in the snapshot.
+ if (root->IsHeapObject() &&
+ isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
HeapObject* heap_object = HeapObject::cast(root);
- if (LookupEntry(map_, heap_object, false) != NULL) {
- // Some root values are initialized to the empty FixedArray();
- // Do not add them to the map.
- // TODO(yangguo): This assert is not true. Some roots like
- // instanceof_cache_answer can be e.g. null.
- // DCHECK_EQ(isolate->heap()->empty_fixed_array(), heap_object);
+ HashMap::Entry* entry = LookupEntry(map_, heap_object, false);
+ if (entry != NULL) {
+ // Some are initialized to a previous value in the root list.
+ DCHECK_LT(GetValue(entry), i);
} else {
SetValue(LookupEntry(map_, heap_object, true), i);
}
}
}
+ isolate->set_root_index_map(map_);
}
@@ -613,9 +515,7 @@ void Deserializer::DecodeReservation(
DCHECK_EQ(0, reservations_[NEW_SPACE].length());
STATIC_ASSERT(NEW_SPACE == 0);
int current_space = NEW_SPACE;
- for (int i = 0; i < res.length(); i++) {
- SerializedData::Reservation r(0);
- memcpy(&r, res.start() + i, sizeof(r));
+ for (auto& r : res) {
reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
if (r.is_last()) current_space++;
}
@@ -651,8 +551,10 @@ void Deserializer::Initialize(Isolate* isolate) {
DCHECK_NULL(isolate_);
DCHECK_NOT_NULL(isolate);
isolate_ = isolate;
- DCHECK_NULL(external_reference_decoder_);
- external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
+ DCHECK_NULL(external_reference_table_);
+ external_reference_table_ = ExternalReferenceTable::instance(isolate);
+ CHECK_EQ(magic_number_,
+ SerializedData::ComputeMagicNumber(external_reference_table_));
}
@@ -672,6 +574,8 @@ void Deserializer::Deserialize(Isolate* isolate) {
isolate_->heap()->undefined_value());
isolate_->heap()->set_array_buffers_list(
isolate_->heap()->undefined_value());
+ isolate->heap()->set_new_array_buffer_views_list(
+ isolate_->heap()->undefined_value());
// The allocation site list is build during root iteration, but if no sites
// were encountered then it needs to be initialized to undefined.
@@ -748,10 +652,6 @@ MaybeHandle<SharedFunctionInfo> Deserializer::DeserializeCode(
Deserializer::~Deserializer() {
// TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
// DCHECK(source_.AtEOF());
- if (external_reference_decoder_) {
- delete external_reference_decoder_;
- external_reference_decoder_ = NULL;
- }
attached_objects_.Dispose();
}
@@ -897,27 +797,13 @@ void Deserializer::ReadObject(int space_number, Object** write_back) {
#ifdef DEBUG
if (obj->IsCode()) {
DCHECK(space_number == CODE_SPACE || space_number == LO_SPACE);
+#ifdef VERIFY_HEAP
+ obj->ObjectVerify();
+#endif // VERIFY_HEAP
} else {
DCHECK(space_number != CODE_SPACE);
}
-#endif
-#if V8_TARGET_ARCH_PPC && \
- (ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL)
- // If we're on a platform that uses function descriptors
- // these jump tables make use of RelocInfo::INTERNAL_REFERENCE.
- // As the V8 serialization code doesn't handle that relocation type
- // we use this to fix up code that has function descriptors.
- if (space_number == CODE_SPACE) {
- Code* code = reinterpret_cast<Code*>(HeapObject::FromAddress(address));
- for (RelocIterator it(code); !it.done(); it.next()) {
- RelocInfo::Mode rmode = it.rinfo()->rmode();
- if (rmode == RelocInfo::INTERNAL_REFERENCE) {
- Assembler::RelocateInternalReference(it.rinfo()->pc(), 0,
- code->instruction_start());
- }
- }
- }
-#endif
+#endif // DEBUG
}
@@ -963,18 +849,16 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
// Write barrier support costs around 1% in startup time. In fact there
// are no new space objects in current boot snapshots, so it's not needed,
// but that may change.
- bool write_barrier_needed = (current_object_address != NULL &&
- source_space != NEW_SPACE &&
- source_space != CELL_SPACE &&
- source_space != PROPERTY_CELL_SPACE &&
- source_space != CODE_SPACE &&
- source_space != OLD_DATA_SPACE);
+ bool write_barrier_needed =
+ (current_object_address != NULL && source_space != NEW_SPACE &&
+ source_space != CELL_SPACE && source_space != CODE_SPACE &&
+ source_space != OLD_DATA_SPACE);
while (current < limit) {
byte data = source_.Get();
switch (data) {
#define CASE_STATEMENT(where, how, within, space_number) \
case where + how + within + space_number: \
- STATIC_ASSERT((where & ~kPointedToMask) == 0); \
+ STATIC_ASSERT((where & ~kWhereMask) == 0); \
STATIC_ASSERT((how & ~kHowToCodeMask) == 0); \
STATIC_ASSERT((within & ~kWhereToPointMask) == 0); \
STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
@@ -993,25 +877,37 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
Object* new_object = NULL; /* May not be a real Object pointer. */ \
if (where == kNewObject) { \
ReadObject(space_number, &new_object); \
+ } else if (where == kBackref) { \
+ emit_write_barrier = (space_number == NEW_SPACE); \
+ new_object = GetBackReferencedObject(data & kSpaceMask); \
+ } else if (where == kBackrefWithSkip) { \
+ int skip = source_.GetInt(); \
+ current = reinterpret_cast<Object**>( \
+ reinterpret_cast<Address>(current) + skip); \
+ emit_write_barrier = (space_number == NEW_SPACE); \
+ new_object = GetBackReferencedObject(data & kSpaceMask); \
} else if (where == kRootArray) { \
int root_id = source_.GetInt(); \
new_object = isolate->heap()->roots_array_start()[root_id]; \
emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else if (where == kPartialSnapshotCache) { \
int cache_index = source_.GetInt(); \
- new_object = isolate->serialize_partial_snapshot_cache()[cache_index]; \
+ new_object = isolate->partial_snapshot_cache()->at(cache_index); \
emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
} else if (where == kExternalReference) { \
int skip = source_.GetInt(); \
current = reinterpret_cast<Object**>( \
reinterpret_cast<Address>(current) + skip); \
int reference_id = source_.GetInt(); \
- Address address = external_reference_decoder_->Decode(reference_id); \
+ Address address = external_reference_table_->address(reference_id); \
new_object = reinterpret_cast<Object*>(address); \
- } else if (where == kBackref) { \
- emit_write_barrier = (space_number == NEW_SPACE); \
- new_object = GetBackReferencedObject(data & kSpaceMask); \
- } else if (where == kBuiltin) { \
+ } else if (where == kAttachedReference) { \
+ int index = source_.GetInt(); \
+ DCHECK(deserializing_user_code() || index == kGlobalProxyReference); \
+ new_object = *attached_objects_[index]; \
+ emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
+ } else { \
+ DCHECK(where == kBuiltin); \
DCHECK(deserializing_user_code()); \
int builtin_id = source_.GetInt(); \
DCHECK_LE(0, builtin_id); \
@@ -1019,18 +915,6 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
Builtins::Name name = static_cast<Builtins::Name>(builtin_id); \
new_object = isolate->builtins()->builtin(name); \
emit_write_barrier = false; \
- } else if (where == kAttachedReference) { \
- int index = source_.GetInt(); \
- DCHECK(deserializing_user_code() || index == kGlobalProxyReference); \
- new_object = *attached_objects_[index]; \
- emit_write_barrier = isolate->heap()->InNewSpace(new_object); \
- } else { \
- DCHECK(where == kBackrefWithSkip); \
- int skip = source_.GetInt(); \
- current = reinterpret_cast<Object**>( \
- reinterpret_cast<Address>(current) + skip); \
- emit_write_barrier = (space_number == NEW_SPACE); \
- new_object = GetBackReferencedObject(data & kSpaceMask); \
} \
if (within == kInnerPointer) { \
if (space_number != CODE_SPACE || new_object->IsCode()) { \
@@ -1069,18 +953,17 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
}
// This generates a case and a body for the new space (which has to do extra
-// write barrier handling) and handles the other spaces with 8 fall-through
-// cases and one body.
-#define ALL_SPACES(where, how, within) \
- CASE_STATEMENT(where, how, within, NEW_SPACE) \
- CASE_BODY(where, how, within, NEW_SPACE) \
- CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
- CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
- CASE_STATEMENT(where, how, within, CODE_SPACE) \
- CASE_STATEMENT(where, how, within, MAP_SPACE) \
- CASE_STATEMENT(where, how, within, CELL_SPACE) \
- CASE_STATEMENT(where, how, within, PROPERTY_CELL_SPACE) \
- CASE_STATEMENT(where, how, within, LO_SPACE) \
+// write barrier handling) and handles the other spaces with fall-through cases
+// and one body.
+#define ALL_SPACES(where, how, within) \
+ CASE_STATEMENT(where, how, within, NEW_SPACE) \
+ CASE_BODY(where, how, within, NEW_SPACE) \
+ CASE_STATEMENT(where, how, within, OLD_DATA_SPACE) \
+ CASE_STATEMENT(where, how, within, OLD_POINTER_SPACE) \
+ CASE_STATEMENT(where, how, within, CODE_SPACE) \
+ CASE_STATEMENT(where, how, within, MAP_SPACE) \
+ CASE_STATEMENT(where, how, within, CELL_SPACE) \
+ CASE_STATEMENT(where, how, within, LO_SPACE) \
CASE_BODY(where, how, within, kAnyOldSpace)
#define FOUR_CASES(byte_code) \
@@ -1095,106 +978,6 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
FOUR_CASES(byte_code + 8) \
FOUR_CASES(byte_code + 12)
-#define COMMON_RAW_LENGTHS(f) \
- f(1) \
- f(2) \
- f(3) \
- f(4) \
- f(5) \
- f(6) \
- f(7) \
- f(8) \
- f(9) \
- f(10) \
- f(11) \
- f(12) \
- f(13) \
- f(14) \
- f(15) \
- f(16) \
- f(17) \
- f(18) \
- f(19) \
- f(20) \
- f(21) \
- f(22) \
- f(23) \
- f(24) \
- f(25) \
- f(26) \
- f(27) \
- f(28) \
- f(29) \
- f(30) \
- f(31)
-
- // We generate 15 cases and bodies that process special tags that combine
- // the raw data tag and the length into one byte.
-#define RAW_CASE(index) \
- case kRawData + index: { \
- byte* raw_data_out = reinterpret_cast<byte*>(current); \
- source_.CopyRaw(raw_data_out, index* kPointerSize); \
- current = reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
- break; \
- }
- COMMON_RAW_LENGTHS(RAW_CASE)
-#undef RAW_CASE
-
- // Deserialize a chunk of raw data that doesn't have one of the popular
- // lengths.
- case kRawData: {
- int size = source_.GetInt();
- byte* raw_data_out = reinterpret_cast<byte*>(current);
- source_.CopyRaw(raw_data_out, size);
- break;
- }
-
- SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance)
- SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) {
- int root_id = RootArrayConstantFromByteCode(data);
- Object* object = isolate->heap()->roots_array_start()[root_id];
- DCHECK(!isolate->heap()->InNewSpace(object));
- UnalignedCopy(current++, &object);
- break;
- }
-
- SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance)
- SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance + 16) {
- int root_id = RootArrayConstantFromByteCode(data);
- int skip = source_.GetInt();
- current = reinterpret_cast<Object**>(
- reinterpret_cast<intptr_t>(current) + skip);
- Object* object = isolate->heap()->roots_array_start()[root_id];
- DCHECK(!isolate->heap()->InNewSpace(object));
- UnalignedCopy(current++, &object);
- break;
- }
-
- case kVariableRepeat: {
- int repeats = source_.GetInt();
- Object* object = current[-1];
- DCHECK(!isolate->heap()->InNewSpace(object));
- for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
- break;
- }
-
- STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
- Heap::kOldSpaceRoots);
- STATIC_ASSERT(kMaxFixedRepeats == 15);
- FOUR_CASES(kFixedRepeat)
- FOUR_CASES(kFixedRepeat + 4)
- FOUR_CASES(kFixedRepeat + 8)
- case kFixedRepeat + 12:
- case kFixedRepeat + 13:
- case kFixedRepeat + 14: {
- int repeats = RepeatsForCode(data);
- Object* object;
- UnalignedCopy(&object, current - 1);
- DCHECK(!isolate->heap()->InNewSpace(object));
- for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
- break;
- }
-
// Deserialize a new object and write a pointer to it to the current
// object.
ALL_SPACES(kNewObject, kPlain, kStartOfObject)
@@ -1244,38 +1027,19 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
// Find an object in the partial snapshots cache and write a pointer to it
// to the current object.
CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
- CASE_BODY(kPartialSnapshotCache,
- kPlain,
- kStartOfObject,
- 0)
+ CASE_BODY(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
// Find an code entry in the partial snapshots cache and
// write a pointer to it to the current object.
CASE_STATEMENT(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
- CASE_BODY(kPartialSnapshotCache,
- kPlain,
- kInnerPointer,
- 0)
+ CASE_BODY(kPartialSnapshotCache, kPlain, kInnerPointer, 0)
// Find an external reference and write a pointer to it to the current
// object.
CASE_STATEMENT(kExternalReference, kPlain, kStartOfObject, 0)
- CASE_BODY(kExternalReference,
- kPlain,
- kStartOfObject,
- 0)
+ CASE_BODY(kExternalReference, kPlain, kStartOfObject, 0)
// Find an external reference and write a pointer to it in the current
// code object.
CASE_STATEMENT(kExternalReference, kFromCode, kStartOfObject, 0)
- CASE_BODY(kExternalReference,
- kFromCode,
- kStartOfObject,
- 0)
- // Find a builtin and write a pointer to it to the current object.
- CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0)
- CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0)
- CASE_STATEMENT(kBuiltin, kPlain, kInnerPointer, 0)
- CASE_BODY(kBuiltin, kPlain, kInnerPointer, 0)
- CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0)
- CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0)
+ CASE_BODY(kExternalReference, kFromCode, kStartOfObject, 0)
// Find an object in the attached references and write a pointer to it to
// the current object.
CASE_STATEMENT(kAttachedReference, kPlain, kStartOfObject, 0)
@@ -1284,6 +1048,13 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
CASE_BODY(kAttachedReference, kPlain, kInnerPointer, 0)
CASE_STATEMENT(kAttachedReference, kFromCode, kInnerPointer, 0)
CASE_BODY(kAttachedReference, kFromCode, kInnerPointer, 0)
+ // Find a builtin and write a pointer to it to the current object.
+ CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0)
+ CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0)
+ CASE_STATEMENT(kBuiltin, kPlain, kInnerPointer, 0)
+ CASE_BODY(kBuiltin, kPlain, kInnerPointer, 0)
+ CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0)
+ CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0)
#undef CASE_STATEMENT
#undef CASE_BODY
@@ -1296,18 +1067,28 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
break;
}
- case kNativesStringResource: {
- DCHECK(!isolate_->heap()->deserialization_complete());
- int index = source_.Get();
- Vector<const char> source_vector = Natives::GetScriptSource(index);
- NativesExternalStringResource* resource =
- new NativesExternalStringResource(source_vector.start(),
- source_vector.length());
- Object* resource_obj = reinterpret_cast<Object*>(resource);
- UnalignedCopy(current++, &resource_obj);
+ case kInternalReferenceEncoded:
+ case kInternalReference: {
+ // Internal reference address is not encoded via skip, but by offset
+ // from code entry.
+ int pc_offset = source_.GetInt();
+ int target_offset = source_.GetInt();
+ Code* code =
+ Code::cast(HeapObject::FromAddress(current_object_address));
+ DCHECK(0 <= pc_offset && pc_offset <= code->instruction_size());
+ DCHECK(0 <= target_offset && target_offset <= code->instruction_size());
+ Address pc = code->entry() + pc_offset;
+ Address target = code->entry() + target_offset;
+ Assembler::deserialization_set_target_internal_reference_at(
+ pc, target, data == kInternalReference
+ ? RelocInfo::INTERNAL_REFERENCE
+ : RelocInfo::INTERNAL_REFERENCE_ENCODED);
break;
}
+ case kNop:
+ break;
+
case kNextChunk: {
int space = source_.Get();
DCHECK(space < kNumberOfPreallocatedSpaces);
@@ -1322,6 +1103,60 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
break;
}
+ case kSynchronize:
+ // If we get here then that indicates that you have a mismatch between
+ // the number of GC roots when serializing and deserializing.
+ CHECK(false);
+ break;
+
+ case kNativesStringResource: {
+ DCHECK(!isolate_->heap()->deserialization_complete());
+ int index = source_.Get();
+ Vector<const char> source_vector = Natives::GetScriptSource(index);
+ NativesExternalStringResource* resource =
+ new NativesExternalStringResource(source_vector.start(),
+ source_vector.length());
+ Object* resource_obj = reinterpret_cast<Object*>(resource);
+ UnalignedCopy(current++, &resource_obj);
+ break;
+ }
+
+ // Deserialize raw data of variable length.
+ case kVariableRawData: {
+ int size_in_bytes = source_.GetInt();
+ byte* raw_data_out = reinterpret_cast<byte*>(current);
+ source_.CopyRaw(raw_data_out, size_in_bytes);
+ break;
+ }
+
+ case kVariableRepeat: {
+ int repeats = source_.GetInt();
+ Object* object = current[-1];
+ DCHECK(!isolate->heap()->InNewSpace(object));
+ for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
+ break;
+ }
+
+ STATIC_ASSERT(kNumberOfRootArrayConstants == Heap::kOldSpaceRoots);
+ STATIC_ASSERT(kNumberOfRootArrayConstants == 32);
+ SIXTEEN_CASES(kRootArrayConstantsWithSkip)
+ SIXTEEN_CASES(kRootArrayConstantsWithSkip + 16) {
+ int skip = source_.GetInt();
+ current = reinterpret_cast<Object**>(
+ reinterpret_cast<intptr_t>(current) + skip);
+ // Fall through.
+ }
+
+ SIXTEEN_CASES(kRootArrayConstants)
+ SIXTEEN_CASES(kRootArrayConstants + 16) {
+ int root_id = data & kRootArrayConstantsMask;
+ Object* object = isolate->heap()->roots_array_start()[root_id];
+ DCHECK(!isolate->heap()->InNewSpace(object));
+ UnalignedCopy(current++, &object);
+ break;
+ }
+
+ STATIC_ASSERT(kNumberOfHotObjects == 8);
FOUR_CASES(kHotObjectWithSkip)
FOUR_CASES(kHotObjectWithSkip + 4) {
int skip = source_.GetInt();
@@ -1329,9 +1164,10 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
reinterpret_cast<Address>(current) + skip);
// Fall through.
}
+
FOUR_CASES(kHotObject)
FOUR_CASES(kHotObject + 4) {
- int index = data & kHotObjectIndexMask;
+ int index = data & kHotObjectMask;
Object* hot_object = hot_objects_.Get(index);
UnalignedCopy(current, &hot_object);
if (write_barrier_needed && isolate->heap()->InNewSpace(hot_object)) {
@@ -1344,12 +1180,30 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
break;
}
- case kSynchronize: {
- // If we get here then that indicates that you have a mismatch between
- // the number of GC roots when serializing and deserializing.
- CHECK(false);
+ // Deserialize raw data of fixed length from 1 to 32 words.
+ STATIC_ASSERT(kNumberOfFixedRawData == 32);
+ SIXTEEN_CASES(kFixedRawData)
+ SIXTEEN_CASES(kFixedRawData + 16) {
+ byte* raw_data_out = reinterpret_cast<byte*>(current);
+ int size_in_bytes = (data - kFixedRawDataStart) << kPointerSizeLog2;
+ source_.CopyRaw(raw_data_out, size_in_bytes);
+ current = reinterpret_cast<Object**>(raw_data_out + size_in_bytes);
+ break;
+ }
+
+ STATIC_ASSERT(kNumberOfFixedRepeat == 16);
+ SIXTEEN_CASES(kFixedRepeat) {
+ int repeats = data - kFixedRepeatStart;
+ Object* object;
+ UnalignedCopy(&object, current - 1);
+ DCHECK(!isolate->heap()->InNewSpace(object));
+ for (int i = 0; i < repeats; i++) UnalignedCopy(current++, &object);
+ break;
}
+#undef SIXTEEN_CASES
+#undef FOUR_CASES
+
default:
CHECK(false);
}
@@ -1361,7 +1215,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
: isolate_(isolate),
sink_(sink),
- external_reference_encoder_(new ExternalReferenceEncoder(isolate)),
+ external_reference_encoder_(isolate),
root_index_map_(isolate),
code_address_map_(NULL),
large_objects_total_size_(0),
@@ -1377,7 +1231,6 @@ Serializer::Serializer(Isolate* isolate, SnapshotByteSink* sink)
Serializer::~Serializer() {
- delete external_reference_encoder_;
if (code_address_map_ != NULL) delete code_address_map_;
}
@@ -1509,43 +1362,35 @@ void Serializer::EncodeReservations(
void SerializerDeserializer::Iterate(Isolate* isolate,
ObjectVisitor* visitor) {
if (isolate->serializer_enabled()) return;
- for (int i = 0; ; i++) {
- if (isolate->serialize_partial_snapshot_cache_length() <= i) {
- // Extend the array ready to get a value from the visitor when
- // deserializing.
- isolate->PushToPartialSnapshotCache(Smi::FromInt(0));
- }
- Object** cache = isolate->serialize_partial_snapshot_cache();
- visitor->VisitPointers(&cache[i], &cache[i + 1]);
+ List<Object*>* cache = isolate->partial_snapshot_cache();
+ for (int i = 0;; ++i) {
+ // Extend the array ready to get a value when deserializing.
+ if (cache->length() <= i) cache->Add(Smi::FromInt(0));
+ visitor->VisitPointer(&cache->at(i));
// Sentinel is the undefined object, which is a root so it will not normally
// be found in the cache.
- if (cache[i] == isolate->heap()->undefined_value()) {
- break;
- }
+ if (cache->at(i)->IsUndefined()) break;
}
}
int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
Isolate* isolate = this->isolate();
-
- for (int i = 0;
- i < isolate->serialize_partial_snapshot_cache_length();
- i++) {
- Object* entry = isolate->serialize_partial_snapshot_cache()[i];
- if (entry == heap_object) return i;
+ List<Object*>* cache = isolate->partial_snapshot_cache();
+ int new_index = cache->length();
+
+ int index = partial_cache_index_map_.LookupOrInsert(heap_object, new_index);
+ if (index == PartialCacheIndexMap::kInvalidIndex) {
+ // We didn't find the object in the cache. So we add it to the cache and
+ // then visit the pointer so that it becomes part of the startup snapshot
+ // and we can refer to it from the partial snapshot.
+ cache->Add(heap_object);
+ startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
+ // We don't recurse from the startup snapshot generator into the partial
+ // snapshot generator.
+ return new_index;
}
-
- // We didn't find the object in the cache. So we add it to the cache and
- // then visit the pointer so that it becomes part of the startup snapshot
- // and we can refer to it from the partial snapshot.
- int length = isolate->serialize_partial_snapshot_cache_length();
- isolate->PushToPartialSnapshotCache(heap_object);
- startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
- // We don't recurse from the startup snapshot generator into the partial
- // snapshot generator.
- DCHECK(length == isolate->serialize_partial_snapshot_cache_length() - 1);
- return length;
+ return index;
}
@@ -1575,7 +1420,7 @@ bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
// Encode a reference to a hot object by its index in the working set.
int index = hot_objects_.Find(obj);
if (index != HotObjectsList::kNotFound) {
- DCHECK(index >= 0 && index <= kMaxHotObjectIndex);
+ DCHECK(index >= 0 && index < kNumberOfHotObjects);
if (FLAG_trace_serializer) {
PrintF(" Encoding hot object %d:", index);
obj->ShortPrint();
@@ -1647,6 +1492,10 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
return;
}
+ if (obj->IsCode() && Code::cast(obj)->kind() == Code::FUNCTION) {
+ obj = isolate()->builtins()->builtin(Builtins::kCompileLazy);
+ }
+
if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
FlushSkip(skip);
@@ -1682,16 +1531,13 @@ void Serializer::PutRoot(int root_index,
PrintF("\n");
}
- if (how_to_code == kPlain &&
- where_to_point == kStartOfObject &&
- root_index < kRootArrayNumberOfConstantEncodings &&
+ if (how_to_code == kPlain && where_to_point == kStartOfObject &&
+ root_index < kNumberOfRootArrayConstants &&
!isolate()->heap()->InNewSpace(object)) {
if (skip == 0) {
- sink_->Put(kRootArrayConstants + kNoSkipDistance + root_index,
- "RootConstant");
+ sink_->Put(kRootArrayConstants + root_index, "RootConstant");
} else {
- sink_->Put(kRootArrayConstants + kHasSkipDistance + root_index,
- "RootConstant");
+ sink_->Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
sink_->PutInt(skip, "SkipInPutRoot");
}
} else {
@@ -1842,7 +1688,7 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
// Output raw data header. Do not bother with common raw length cases here.
- sink_->Put(kRawData, "RawDataForString");
+ sink_->Put(kVariableRawData, "RawDataForString");
sink_->PutInt(bytes_to_output, "length");
// Serialize string header (except for map).
@@ -1897,7 +1743,9 @@ void Serializer::ObjectSerializer::Serialize() {
int size = object_->Size();
Map* map = object_->map();
- SerializePrologue(Serializer::SpaceOfObject(object_), size, map);
+ AllocationSpace space =
+ MemoryChunk::FromAddress(object_->address())->owner()->identity();
+ SerializePrologue(space, size, map);
// Serialize the rest of the object.
CHECK_EQ(0, bytes_processed_so_far_);
@@ -1931,11 +1779,11 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
}
current += repeat_count;
bytes_processed_so_far_ += repeat_count * kPointerSize;
- if (repeat_count > kMaxFixedRepeats) {
- sink_->Put(kVariableRepeat, "SerializeRepeats");
- sink_->PutInt(repeat_count, "SerializeRepeats");
+ if (repeat_count > kNumberOfFixedRepeat) {
+ sink_->Put(kVariableRepeat, "VariableRepeat");
+ sink_->PutInt(repeat_count, "repeat count");
} else {
- sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
+ sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
}
} else {
serializer_->SerializeObject(
@@ -1979,12 +1827,37 @@ void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
sink_->PutInt(skip, "SkipB4ExternalRef");
- Address target = rinfo->target_reference();
+ Address target = rinfo->target_external_reference();
sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
bytes_processed_so_far_ += rinfo->target_address_size();
}
+void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
+ // We can only reference to internal references of code that has been output.
+ DCHECK(is_code_object_ && code_has_been_output_);
+ // We do not use skip from last patched pc to find the pc to patch, since
+ // target_address_address may not return addresses in ascending order when
+ // used for internal references. External references may be stored at the
+ // end of the code in the constant pool, whereas internal references are
+ // inline. That would cause the skip to be negative. Instead, we store the
+ // offset from code entry.
+ Address entry = Code::cast(object_)->entry();
+ intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
+ intptr_t target_offset = rinfo->target_internal_reference() - entry;
+ DCHECK(0 <= pc_offset &&
+ pc_offset <= Code::cast(object_)->instruction_size());
+ DCHECK(0 <= target_offset &&
+ target_offset <= Code::cast(object_)->instruction_size());
+ sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
+ ? kInternalReference
+ : kInternalReferenceEncoded,
+ "InternalRef");
+ sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
+ sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
+}
+
+
void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
@@ -2053,24 +1926,29 @@ void Serializer::ObjectSerializer::VisitExternalOneByteString(
}
-static Code* CloneCodeObject(HeapObject* code) {
- Address copy = new byte[code->Size()];
- MemCopy(copy, code->address(), code->Size());
- return Code::cast(HeapObject::FromAddress(copy));
-}
-
-
-static void WipeOutRelocations(Code* code) {
- int mode_mask =
- RelocInfo::kCodeTargetMask |
- RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
- RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
- RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+Address Serializer::ObjectSerializer::PrepareCode() {
+ // To make snapshots reproducible, we make a copy of the code object
+ // and wipe all pointers in the copy, which we then serialize.
+ Code* original = Code::cast(object_);
+ Code* code = serializer_->CopyCode(original);
+ // Code age headers are not serializable.
+ code->MakeYoung(serializer_->isolate());
+ int mode_mask = RelocInfo::kCodeTargetMask |
+ RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+ RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
- if (!(FLAG_enable_ool_constant_pool && it.rinfo()->IsInConstantPool())) {
- it.rinfo()->WipeOut();
+ RelocInfo* rinfo = it.rinfo();
+ if (!(FLAG_enable_ool_constant_pool && rinfo->IsInConstantPool())) {
+ rinfo->WipeOut();
}
}
+ // We need to wipe out the header fields *after* wiping out the
+ // relocations, because some of these fields are needed for the latter.
+ code->WipeOutHeader();
+ return code->address();
}
@@ -2086,47 +1964,33 @@ int Serializer::ObjectSerializer::OutputRawData(
// locations in a non-ascending order. Luckily that doesn't happen.
DCHECK(to_skip >= 0);
bool outputting_code = false;
- if (to_skip != 0 && code_object_ && !code_has_been_output_) {
+ if (to_skip != 0 && is_code_object_ && !code_has_been_output_) {
// Output the code all at once and fix later.
bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
outputting_code = true;
code_has_been_output_ = true;
}
- if (bytes_to_output != 0 &&
- (!code_object_ || outputting_code)) {
-#define RAW_CASE(index) \
- if (!outputting_code && bytes_to_output == index * kPointerSize && \
- index * kPointerSize == to_skip) { \
- sink_->PutSection(kRawData + index, "RawDataFixed"); \
- to_skip = 0; /* This insn already skips. */ \
- } else /* NOLINT */
- COMMON_RAW_LENGTHS(RAW_CASE)
-#undef RAW_CASE
- { /* NOLINT */
+ if (bytes_to_output != 0 && (!is_code_object_ || outputting_code)) {
+ if (!outputting_code && bytes_to_output == to_skip &&
+ IsAligned(bytes_to_output, kPointerAlignment) &&
+ bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
+ int size_in_words = bytes_to_output >> kPointerSizeLog2;
+ sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
+ to_skip = 0; // This instruction includes skip.
+ } else {
// We always end up here if we are outputting the code of a code object.
- sink_->Put(kRawData, "RawData");
+ sink_->Put(kVariableRawData, "VariableRawData");
sink_->PutInt(bytes_to_output, "length");
}
- // To make snapshots reproducible, we need to wipe out all pointers in code.
- if (code_object_) {
- Code* code = CloneCodeObject(object_);
- // Code age headers are not serializable.
- code->MakeYoung(serializer_->isolate());
- WipeOutRelocations(code);
- // We need to wipe out the header fields *after* wiping out the
- // relocations, because some of these fields are needed for the latter.
- code->WipeOutHeader();
- object_start = code->address();
- }
+ if (is_code_object_) object_start = PrepareCode();
- const char* description = code_object_ ? "Code" : "Byte";
+ const char* description = is_code_object_ ? "Code" : "Byte";
#ifdef MEMORY_SANITIZER
// Object sizes are usually rounded up with uninitialized padding space.
MSAN_MEMORY_IS_INITIALIZED(object_start + base, bytes_to_output);
#endif // MEMORY_SANITIZER
sink_->PutRaw(object_start + base, bytes_to_output, description);
- if (code_object_) delete[] object_start;
}
if (to_skip != 0 && return_skip == kIgnoringReturn) {
sink_->Put(kSkip, "Skip");
@@ -2137,19 +2001,6 @@ int Serializer::ObjectSerializer::OutputRawData(
}
-AllocationSpace Serializer::SpaceOfObject(HeapObject* object) {
- for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
- AllocationSpace s = static_cast<AllocationSpace>(i);
- if (object->GetHeap()->InSpace(object, s)) {
- DCHECK(i < kNumberOfSpaces);
- return s;
- }
- }
- UNREACHABLE();
- return FIRST_SPACE;
-}
-
-
BackReference Serializer::AllocateLargeObject(int size) {
// Large objects are allocated one-by-one when deserializing. We do not
// have to keep track of multiple chunks.
@@ -2198,6 +2049,14 @@ void Serializer::InitializeCodeAddressMap() {
}
+Code* Serializer::CopyCode(Code* code) {
+ code_buffer_.Rewind(0); // Clear buffer without deleting backing store.
+ int size = code->CodeSize();
+ code_buffer_.AddAll(Vector<byte>(code->address(), size));
+ return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
+}
+
+
ScriptData* CodeSerializer::Serialize(Isolate* isolate,
Handle<SharedFunctionInfo> info,
Handle<String> source) {
@@ -2400,7 +2259,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
HandleScope scope(isolate);
SmartPointer<SerializedCodeData> scd(
- SerializedCodeData::FromCachedData(cached_data, *source));
+ SerializedCodeData::FromCachedData(isolate, cached_data, *source));
if (scd.is_empty()) {
if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
DCHECK(cached_data->rejected());
@@ -2450,7 +2309,6 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
isolate->logger()->CodeCreateEvent(Logger::SCRIPT_TAG, result->code(),
*result, NULL, name);
}
-
return scope.CloseAndEscape(result);
}
@@ -2478,6 +2336,7 @@ SnapshotData::SnapshotData(const Serializer& ser) {
AllocateData(size);
// Set header values.
+ SetMagicNumber(ser.isolate());
SetHeaderValue(kCheckSumOffset, Version::Hash());
SetHeaderValue(kNumReservationsOffset, reservations.length());
SetHeaderValue(kPayloadLengthOffset, payload.length());
@@ -2568,7 +2427,7 @@ SerializedCodeData::SerializedCodeData(const List<byte>& payload,
AllocateData(size);
// Set header values.
- SetHeaderValue(kMagicNumberOffset, kMagicNumber);
+ SetMagicNumber(cs.isolate());
SetHeaderValue(kVersionHashOffset, Version::Hash());
SetHeaderValue(kSourceHashOffset, SourceHash(cs.source()));
SetHeaderValue(kCpuFeaturesOffset,
@@ -2600,15 +2459,15 @@ SerializedCodeData::SerializedCodeData(const List<byte>& payload,
SerializedCodeData::SanityCheckResult SerializedCodeData::SanityCheck(
- String* source) const {
- uint32_t magic_number = GetHeaderValue(kMagicNumberOffset);
+ Isolate* isolate, String* source) const {
+ uint32_t magic_number = GetMagicNumber();
uint32_t version_hash = GetHeaderValue(kVersionHashOffset);
uint32_t source_hash = GetHeaderValue(kSourceHashOffset);
uint32_t cpu_features = GetHeaderValue(kCpuFeaturesOffset);
uint32_t flags_hash = GetHeaderValue(kFlagHashOffset);
uint32_t c1 = GetHeaderValue(kChecksum1Offset);
uint32_t c2 = GetHeaderValue(kChecksum2Offset);
- if (magic_number != kMagicNumber) return MAGIC_NUMBER_MISMATCH;
+ if (magic_number != ComputeMagicNumber(isolate)) return MAGIC_NUMBER_MISMATCH;
if (version_hash != Version::Hash()) return VERSION_MISMATCH;
if (source_hash != SourceHash(source)) return SOURCE_MISMATCH;
if (cpu_features != static_cast<uint32_t>(CpuFeatures::SupportedFeatures())) {
@@ -2668,11 +2527,12 @@ SerializedCodeData::SerializedCodeData(ScriptData* data)
: SerializedData(const_cast<byte*>(data->data()), data->length()) {}
-SerializedCodeData* SerializedCodeData::FromCachedData(ScriptData* cached_data,
+SerializedCodeData* SerializedCodeData::FromCachedData(Isolate* isolate,
+ ScriptData* cached_data,
String* source) {
DisallowHeapAllocation no_gc;
SerializedCodeData* scd = new SerializedCodeData(cached_data);
- SanityCheckResult r = scd->SanityCheck(source);
+ SanityCheckResult r = scd->SanityCheck(isolate, source);
if (r == CHECK_SUCCESS) return scd;
cached_data->Reject();
source->GetIsolate()->counters()->code_cache_reject_reason()->AddSample(r);
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/snapshot/serialize.h
index b76abbcbac..41c4286e2b 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/snapshot/serialize.h
@@ -8,37 +8,14 @@
#include "src/hashmap.h"
#include "src/heap-profiler.h"
#include "src/isolate.h"
-#include "src/snapshot-source-sink.h"
+#include "src/snapshot/snapshot-source-sink.h"
namespace v8 {
namespace internal {
class ScriptData;
-// A TypeCode is used to distinguish different kinds of external reference.
-// It is a single bit to make testing for types easy.
-enum TypeCode {
- UNCLASSIFIED, // One-of-a-kind references.
- C_BUILTIN,
- BUILTIN,
- RUNTIME_FUNCTION,
- IC_UTILITY,
- STATS_COUNTER,
- TOP_ADDRESS,
- ACCESSOR_CODE,
- STUB_CACHE_TABLE,
- RUNTIME_ENTRY,
- LAZY_DEOPTIMIZATION
-};
-
-const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1;
-const int kFirstTypeCode = UNCLASSIFIED;
-
-const int kReferenceIdBits = 16;
-const int kReferenceIdMask = (1 << kReferenceIdBits) - 1;
-const int kReferenceTypeShift = kReferenceIdBits;
-
-const int kDeoptTableSerializeEntryCount = 64;
+static const int kDeoptTableSerializeEntryCount = 64;
// ExternalReferenceTable is a helper class that defines the relationship
// between external references and their encodings. It is used to build
@@ -47,46 +24,28 @@ class ExternalReferenceTable {
public:
static ExternalReferenceTable* instance(Isolate* isolate);
- ~ExternalReferenceTable() { }
-
int size() const { return refs_.length(); }
-
Address address(int i) { return refs_[i].address; }
-
- uint32_t code(int i) { return refs_[i].code; }
-
const char* name(int i) { return refs_[i].name; }
- int max_id(int code) { return max_id_[code]; }
+ inline static Address NotAvailable() { return NULL; }
private:
- explicit ExternalReferenceTable(Isolate* isolate) : refs_(64) {
- PopulateTable(isolate);
- }
-
struct ExternalReferenceEntry {
Address address;
- uint32_t code;
const char* name;
};
- void PopulateTable(Isolate* isolate);
-
- // For a few types of references, we can get their address from their id.
- void AddFromId(TypeCode type,
- uint16_t id,
- const char* name,
- Isolate* isolate);
-
- // For other types of references, the caller will figure out the address.
- void Add(Address address, TypeCode type, uint16_t id, const char* name);
+ explicit ExternalReferenceTable(Isolate* isolate);
void Add(Address address, const char* name) {
- Add(address, UNCLASSIFIED, ++max_id_[UNCLASSIFIED], name);
+ ExternalReferenceEntry entry = {address, name};
+ refs_.Add(entry);
}
List<ExternalReferenceEntry> refs_;
- uint16_t max_id_[kTypeCodeCount];
+
+ DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
};
@@ -96,47 +55,17 @@ class ExternalReferenceEncoder {
uint32_t Encode(Address key) const;
- const char* NameOfAddress(Address key) const;
+ const char* NameOfAddress(Isolate* isolate, Address address) const;
private:
- HashMap encodings_;
static uint32_t Hash(Address key) {
- return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >> 2);
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
+ kPointerSizeLog2);
}
- int IndexOf(Address key) const;
-
- void Put(Address key, int index);
-
- Isolate* isolate_;
-};
-
-
-class ExternalReferenceDecoder {
- public:
- explicit ExternalReferenceDecoder(Isolate* isolate);
- ~ExternalReferenceDecoder();
-
- Address Decode(uint32_t key) const {
- if (key == 0) return NULL;
- return *Lookup(key);
- }
-
- private:
- Address** encodings_;
-
- Address* Lookup(uint32_t key) const {
- int type = key >> kReferenceTypeShift;
- DCHECK(kFirstTypeCode <= type && type < kTypeCodeCount);
- int id = key & kReferenceIdMask;
- return &encodings_[type][id];
- }
-
- void Put(uint32_t key, Address value) {
- *Lookup(key) = value;
- }
+ HashMap* map_;
- Isolate* isolate_;
+ DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
};
@@ -150,8 +79,8 @@ class AddressMapBase {
return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry->value));
}
- static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
- bool insert) {
+ inline static HashMap::Entry* LookupEntry(HashMap* map, HeapObject* obj,
+ bool insert) {
return map->Lookup(Key(obj), Hash(obj), insert);
}
@@ -170,9 +99,8 @@ class RootIndexMap : public AddressMapBase {
public:
explicit RootIndexMap(Isolate* isolate);
- ~RootIndexMap() { delete map_; }
-
static const int kInvalidRootIndex = -1;
+
int Lookup(HeapObject* obj) {
HashMap::Entry* entry = LookupEntry(map_, obj, false);
if (entry) return GetValue(entry);
@@ -186,6 +114,28 @@ class RootIndexMap : public AddressMapBase {
};
+class PartialCacheIndexMap : public AddressMapBase {
+ public:
+ PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
+
+ static const int kInvalidIndex = -1;
+
+ // Lookup object in the map. Return its index if found, or create
+ // a new entry with new_index as value, and return kInvalidIndex.
+ int LookupOrInsert(HeapObject* obj, int new_index) {
+ HashMap::Entry* entry = LookupEntry(&map_, obj, false);
+ if (entry != NULL) return GetValue(entry);
+ SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
+ return kInvalidIndex;
+ }
+
+ private:
+ HashMap map_;
+
+ DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
+};
+
+
class BackReference {
public:
explicit BackReference(uint32_t bitfield) : bitfield_(bitfield) {}
@@ -353,113 +303,116 @@ class SerializerDeserializer: public ObjectVisitor {
static const int kNumberOfSpaces = LAST_SPACE + 1;
protected:
+ // ---------- byte code range 0x00..0x7f ----------
+ // Byte codes in this range represent Where, HowToCode and WhereToPoint.
// Where the pointed-to object can be found:
enum Where {
- kNewObject = 0, // Object is next in snapshot.
- // 1-7 One per space.
- // 0x8 Unused.
- kRootArray = 0x9, // Object is found in root array.
- kPartialSnapshotCache = 0xa, // Object is in the cache.
- kExternalReference = 0xb, // Pointer to an external reference.
- kSkip = 0xc, // Skip n bytes.
- kBuiltin = 0xd, // Builtin code object.
- kAttachedReference = 0xe, // Object is described in an attached list.
- // 0xf Used by misc. See below.
- kBackref = 0x10, // Object is described relative to end.
- // 0x11-0x17 One per space.
- kBackrefWithSkip = 0x18, // Object is described relative to end.
- // 0x19-0x1f One per space.
- // 0x20-0x3f Used by misc. See below.
- kPointedToMask = 0x3f
+ // 0x00..0x05 Allocate new object, in specified space.
+ kNewObject = 0,
+ // 0x06 Unused (including 0x26, 0x46, 0x66).
+ // 0x07 Unused (including 0x27, 0x47, 0x67).
+ // 0x08..0x0d Reference to previous object from space.
+ kBackref = 0x08,
+ // 0x0e Unused (including 0x2e, 0x4e, 0x6e).
+ // 0x0f Unused (including 0x2f, 0x4f, 0x6f).
+ // 0x10..0x15 Reference to previous object from space after skip.
+ kBackrefWithSkip = 0x10,
+ // 0x16 Unused (including 0x36, 0x56, 0x76).
+ // 0x17 Unused (including 0x37, 0x57, 0x77).
+ // 0x18 Root array item.
+ kRootArray = 0x18,
+ // 0x19 Object in the partial snapshot cache.
+ kPartialSnapshotCache = 0x19,
+ // 0x1a External reference referenced by id.
+ kExternalReference = 0x1a,
+ // 0x1b Object provided in the attached list.
+ kAttachedReference = 0x1b,
+ // 0x1c Builtin code referenced by index.
+ kBuiltin = 0x1c
+ // 0x1d..0x1f Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
};
+ static const int kWhereMask = 0x1f;
+ static const int kSpaceMask = 7;
+ STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
+
// How to code the pointer to the object.
enum HowToCode {
- kPlain = 0, // Straight pointer.
- // What this means depends on the architecture:
- kFromCode = 0x40, // A pointer inlined in code.
- kHowToCodeMask = 0x40
+ // Straight pointer.
+ kPlain = 0,
+ // A pointer inlined in code. What this means depends on the architecture.
+ kFromCode = 0x20
};
- // For kRootArrayConstants
- enum WithSkip {
- kNoSkipDistance = 0,
- kHasSkipDistance = 0x40,
- kWithSkipMask = 0x40
- };
+ static const int kHowToCodeMask = 0x20;
// Where to point within the object.
enum WhereToPoint {
+ // Points to start of object
kStartOfObject = 0,
- kInnerPointer = 0x80, // First insn in code object or payload of cell.
- kWhereToPointMask = 0x80
+ // Points to instruction in code object or payload of cell.
+ kInnerPointer = 0x40
};
- // Misc.
- // Raw data to be copied from the snapshot. This byte code does not advance
- // the current pointer, which is used for code objects, where we write the
- // entire code in one memcpy, then fix up stuff with kSkip and other byte
- // codes that overwrite data.
- static const int kRawData = 0x20;
- // Some common raw lengths: 0x21-0x3f.
- // These autoadvance the current pointer.
- static const int kOnePointerRawData = 0x21;
-
- static const int kVariableRepeat = 0x60;
- // 0x61-0x6f Repeat last word
- static const int kFixedRepeat = 0x61;
- static const int kFixedRepeatBase = kFixedRepeat - 1;
- static const int kLastFixedRepeat = 0x6f;
- static const int kMaxFixedRepeats = kLastFixedRepeat - kFixedRepeatBase;
- static int CodeForRepeats(int repeats) {
- DCHECK(repeats >= 1 && repeats <= kMaxFixedRepeats);
- return kFixedRepeatBase + repeats;
- }
- static int RepeatsForCode(int byte_code) {
- DCHECK(byte_code > kFixedRepeatBase && byte_code <= kLastFixedRepeat);
- return byte_code - kFixedRepeatBase;
- }
-
- // Hot objects are a small set of recently seen or back-referenced objects.
- // They are represented by a single opcode to save space.
- // We use 0x70..0x77 for 8 hot objects, and 0x78..0x7f to add skip.
- static const int kHotObject = 0x70;
- static const int kMaxHotObjectIndex = 0x77 - kHotObject;
- static const int kHotObjectWithSkip = 0x78;
- STATIC_ASSERT(HotObjectsList::kSize == kMaxHotObjectIndex + 1);
- STATIC_ASSERT(0x7f - kHotObjectWithSkip == kMaxHotObjectIndex);
- static const int kHotObjectIndexMask = 0x7;
-
- static const int kRootArrayConstants = 0xa0;
- // 0xa0-0xbf Things from the first 32 elements of the root array.
- static const int kRootArrayNumberOfConstantEncodings = 0x20;
- static int RootArrayConstantFromByteCode(int byte_code) {
- return byte_code & 0x1f;
- }
+ static const int kWhereToPointMask = 0x40;
+ // ---------- Misc ----------
+ // Skip.
+ static const int kSkip = 0x1d;
+ // Internal reference encoded as offsets of pc and target from code entry.
+ static const int kInternalReference = 0x1e;
+ static const int kInternalReferenceEncoded = 0x1f;
// Do nothing, used for padding.
- static const int kNop = 0xf;
-
+ static const int kNop = 0x3d;
// Move to next reserved chunk.
- static const int kNextChunk = 0x4f;
-
+ static const int kNextChunk = 0x3e;
// A tag emitted at strategic points in the snapshot to delineate sections.
// If the deserializer does not find these at the expected moments then it
// is an indication that the snapshot and the VM do not fit together.
// Examine the build process for architecture, version or configuration
// mismatches.
- static const int kSynchronize = 0x8f;
-
+ static const int kSynchronize = 0x5d;
// Used for the source code of the natives, which is in the executable, but
// is referred to from external strings in the snapshot.
- static const int kNativesStringResource = 0xcf;
-
+ static const int kNativesStringResource = 0x5e;
+ // Raw data of variable length.
+ static const int kVariableRawData = 0x7d;
+ // Repeats of variable length.
+ static const int kVariableRepeat = 0x7e;
+
+ // ---------- byte code range 0x80..0xff ----------
+ // First 32 root array items.
+ static const int kNumberOfRootArrayConstants = 0x20;
+ // 0x80..0x9f
+ static const int kRootArrayConstants = 0x80;
+ // 0xa0..0xbf
+ static const int kRootArrayConstantsWithSkip = 0xa0;
+ static const int kRootArrayConstantsMask = 0x1f;
+
+ // 8 hot (recently seen or back-referenced) objects with optional skip.
+ static const int kNumberOfHotObjects = 0x08;
+ // 0xc0..0xc7
+ static const int kHotObject = 0xc0;
+ // 0xc8..0xcf
+ static const int kHotObjectWithSkip = 0xc8;
+ static const int kHotObjectMask = 0x07;
+
+ // 32 common raw data lengths.
+ static const int kNumberOfFixedRawData = 0x20;
+ // 0xd0..0xef
+ static const int kFixedRawData = 0xd0;
+ static const int kOnePointerRawData = kFixedRawData;
+ static const int kFixedRawDataStart = kFixedRawData - 1;
+
+ // 16 repeats lengths.
+ static const int kNumberOfFixedRepeat = 0x10;
+ // 0xf0..0xff
+ static const int kFixedRepeat = 0xf0;
+ static const int kFixedRepeatStart = kFixedRepeat - 1;
+
+ // ---------- special values ----------
static const int kAnyOldSpace = -1;
- // A bitmask for getting the space out of an instruction.
- static const int kSpaceMask = 7;
- STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
-
// Sentinel after a new object to indicate that double alignment is needed.
static const int kDoubleAlignmentSentinel = 0;
@@ -469,6 +422,7 @@ class SerializerDeserializer: public ObjectVisitor {
// Used as index for the attached reference representing the global proxy.
static const int kGlobalProxyReference = 0;
+ // ---------- member variable ----------
HotObjectsList hot_objects_;
};
@@ -497,9 +451,16 @@ class SerializedData {
if (owns_data_) DeleteArray<byte>(data_);
}
+ uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
+
class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
class IsLastChunkBits : public BitField<bool, 31, 1> {};
+ static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) {
+ uint32_t external_refs = table->size();
+ return 0xC0DE0000 ^ external_refs;
+ }
+
protected:
void SetHeaderValue(int offset, uint32_t value) {
uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
@@ -514,6 +475,16 @@ class SerializedData {
void AllocateData(int size);
+ static uint32_t ComputeMagicNumber(Isolate* isolate) {
+ return ComputeMagicNumber(ExternalReferenceTable::instance(isolate));
+ }
+
+ void SetMagicNumber(Isolate* isolate) {
+ SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
+ }
+
+ static const int kMagicNumberOffset = 0;
+
byte* data_;
int size_;
bool owns_data_;
@@ -528,7 +499,8 @@ class Deserializer: public SerializerDeserializer {
explicit Deserializer(Data* data)
: isolate_(NULL),
source_(data->Payload()),
- external_reference_decoder_(NULL),
+ magic_number_(data->GetMagicNumber()),
+ external_reference_table_(NULL),
deserialized_large_objects_(0),
deserializing_user_code_(false) {
DecodeReservation(data->Reservations());
@@ -602,6 +574,8 @@ class Deserializer: public SerializerDeserializer {
Vector<Handle<Object> > attached_objects_;
SnapshotByteSource source_;
+ uint32_t magic_number_;
+
// The address of the next object that will be allocated in each space.
// Each space has a number of chunks reserved by the GC, with each chunk
// fitting into a page. Deserialized objects are allocated into the
@@ -610,7 +584,7 @@ class Deserializer: public SerializerDeserializer {
uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
Address high_water_[kNumberOfPreallocatedSpaces];
- ExternalReferenceDecoder* external_reference_decoder_;
+ ExternalReferenceTable* external_reference_table_;
List<HeapObject*> deserialized_large_objects_;
@@ -639,23 +613,21 @@ class Serializer : public SerializerDeserializer {
protected:
class ObjectSerializer : public ObjectVisitor {
public:
- ObjectSerializer(Serializer* serializer,
- Object* o,
- SnapshotByteSink* sink,
- HowToCode how_to_code,
- WhereToPoint where_to_point)
- : serializer_(serializer),
- object_(HeapObject::cast(o)),
- sink_(sink),
- reference_representation_(how_to_code + where_to_point),
- bytes_processed_so_far_(0),
- code_object_(o->IsCode()),
- code_has_been_output_(false) { }
+ ObjectSerializer(Serializer* serializer, Object* o, SnapshotByteSink* sink,
+ HowToCode how_to_code, WhereToPoint where_to_point)
+ : serializer_(serializer),
+ object_(HeapObject::cast(o)),
+ sink_(sink),
+ reference_representation_(how_to_code + where_to_point),
+ bytes_processed_so_far_(0),
+ is_code_object_(o->IsCode()),
+ code_has_been_output_(false) {}
void Serialize();
void VisitPointers(Object** start, Object** end);
void VisitEmbeddedPointer(RelocInfo* target);
void VisitExternalReference(Address* p);
void VisitExternalReference(RelocInfo* rinfo);
+ void VisitInternalReference(RelocInfo* rinfo);
void VisitCodeTarget(RelocInfo* target);
void VisitCodeEntry(Address entry_address);
void VisitCell(RelocInfo* rinfo);
@@ -681,12 +653,14 @@ class Serializer : public SerializerDeserializer {
// External strings are serialized in a way to resemble sequential strings.
void SerializeExternalString();
+ Address PrepareCode();
+
Serializer* serializer_;
HeapObject* object_;
SnapshotByteSink* sink_;
int reference_representation_;
int bytes_processed_so_far_;
- bool code_object_;
+ bool is_code_object_;
bool code_has_been_output_;
};
@@ -710,11 +684,10 @@ class Serializer : public SerializerDeserializer {
bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
// This will return the space for an object.
- static AllocationSpace SpaceOfObject(HeapObject* object);
BackReference AllocateLargeObject(int size);
BackReference Allocate(AllocationSpace space, int size);
int EncodeExternalReference(Address addr) {
- return external_reference_encoder_->Encode(addr);
+ return external_reference_encoder_.Encode(addr);
}
// GetInt reads 4 bytes at once, requiring padding at the end.
@@ -728,6 +701,8 @@ class Serializer : public SerializerDeserializer {
// of the serializer. Initialize it on demand.
void InitializeCodeAddressMap();
+ Code* CopyCode(Code* code);
+
inline uint32_t max_chunk_size(int space) const {
DCHECK_LE(0, space);
DCHECK_LT(space, kNumberOfSpaces);
@@ -739,7 +714,7 @@ class Serializer : public SerializerDeserializer {
Isolate* isolate_;
SnapshotByteSink* sink_;
- ExternalReferenceEncoder* external_reference_encoder_;
+ ExternalReferenceEncoder external_reference_encoder_;
BackReferenceMap back_reference_map_;
RootIndexMap root_index_map_;
@@ -762,6 +737,8 @@ class Serializer : public SerializerDeserializer {
uint32_t large_objects_total_size_;
uint32_t seen_large_objects_index_;
+ List<byte> code_buffer_;
+
DISALLOW_COPY_AND_ASSIGN(Serializer);
};
@@ -802,6 +779,7 @@ class PartialSerializer : public Serializer {
Serializer* startup_serializer_;
List<BackReference> outdated_contexts_;
Object* global_object_;
+ PartialCacheIndexMap partial_cache_index_map_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
};
@@ -814,7 +792,7 @@ class StartupSerializer : public Serializer {
// strong roots have been serialized we can create a partial snapshot
// which will repopulate the cache with objects needed by that partial
// snapshot.
- isolate->set_serialize_partial_snapshot_cache_length(0);
+ isolate->partial_snapshot_cache()->Clear();
InitializeCodeAddressMap();
}
@@ -917,13 +895,15 @@ class SnapshotData : public SerializedData {
private:
bool IsSane();
+
// The data header consists of uint32_t-sized entries:
- // [0] version hash
- // [1] number of reservation size entries
- // [2] payload length
+ // [0] magic number and external reference count
+ // [1] version hash
+ // [2] number of reservation size entries
+ // [3] payload length
// ... reservations
// ... serialized payload
- static const int kCheckSumOffset = 0;
+ static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
@@ -934,7 +914,8 @@ class SnapshotData : public SerializedData {
class SerializedCodeData : public SerializedData {
public:
// Used when consuming.
- static SerializedCodeData* FromCachedData(ScriptData* cached_data,
+ static SerializedCodeData* FromCachedData(Isolate* isolate,
+ ScriptData* cached_data,
String* source);
// Used when producing.
@@ -959,17 +940,15 @@ class SerializedCodeData : public SerializedData {
SOURCE_MISMATCH = 3,
CPU_FEATURES_MISMATCH = 4,
FLAGS_MISMATCH = 5,
- CHECKSUM_MISMATCH = 6,
+ CHECKSUM_MISMATCH = 6
};
- SanityCheckResult SanityCheck(String* source) const;
+ SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
uint32_t SourceHash(String* source) const { return source->length(); }
- static const uint32_t kMagicNumber = 0xC0D1F1ED;
-
// The data header consists of uint32_t-sized entries:
- // [ 0] magic number
+ // [ 0] magic number and external reference count
// [ 1] version hash
// [ 2] source hash
// [ 3] cpu features
@@ -983,7 +962,6 @@ class SerializedCodeData : public SerializedData {
// ... reservations
// ... code stub keys
// ... serialized payload
- static const int kMagicNumberOffset = 0;
static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
diff --git a/deps/v8/src/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 637bac0f4d..6d760b5ed9 100644
--- a/deps/v8/src/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -9,16 +9,11 @@
#include "src/api.h"
#include "src/base/platform/platform.h"
#include "src/full-codegen.h"
-#include "src/snapshot.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
-bool Snapshot::HaveASnapshotToStartFrom() {
- return SnapshotBlob().data != NULL;
-}
-
-
#ifdef DEBUG
bool Snapshot::SnapshotIsValid(v8::StartupData* snapshot_blob) {
return !Snapshot::ExtractStartupData(snapshot_blob).is_empty() &&
@@ -27,32 +22,31 @@ bool Snapshot::SnapshotIsValid(v8::StartupData* snapshot_blob) {
#endif // DEBUG
-bool Snapshot::EmbedsScript() {
- if (!HaveASnapshotToStartFrom()) return false;
- const v8::StartupData blob = SnapshotBlob();
- return ExtractMetadata(&blob).embeds_script();
+bool Snapshot::EmbedsScript(Isolate* isolate) {
+ if (!isolate->snapshot_available()) return false;
+ return ExtractMetadata(isolate->snapshot_blob()).embeds_script();
}
-uint32_t Snapshot::SizeOfFirstPage(AllocationSpace space) {
+uint32_t Snapshot::SizeOfFirstPage(Isolate* isolate, AllocationSpace space) {
DCHECK(space >= FIRST_PAGED_SPACE && space <= LAST_PAGED_SPACE);
- if (!HaveASnapshotToStartFrom()) {
+ if (!isolate->snapshot_available()) {
return static_cast<uint32_t>(MemoryAllocator::PageAreaSize(space));
}
uint32_t size;
int offset = kFirstPageSizesOffset + (space - FIRST_PAGED_SPACE) * kInt32Size;
- memcpy(&size, SnapshotBlob().data + offset, kInt32Size);
+ memcpy(&size, isolate->snapshot_blob()->data + offset, kInt32Size);
return size;
}
bool Snapshot::Initialize(Isolate* isolate) {
- if (!HaveASnapshotToStartFrom()) return false;
+ if (!isolate->snapshot_available()) return false;
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
- const v8::StartupData blob = SnapshotBlob();
- Vector<const byte> startup_data = ExtractStartupData(&blob);
+ const v8::StartupData* blob = isolate->snapshot_blob();
+ Vector<const byte> startup_data = ExtractStartupData(blob);
SnapshotData snapshot_data(startup_data);
Deserializer deserializer(&snapshot_data);
bool success = isolate->Init(&deserializer);
@@ -68,12 +62,12 @@ bool Snapshot::Initialize(Isolate* isolate) {
MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
Handle<FixedArray>* outdated_contexts_out) {
- if (!HaveASnapshotToStartFrom()) return Handle<Context>();
+ if (!isolate->snapshot_available()) return Handle<Context>();
base::ElapsedTimer timer;
if (FLAG_profile_deserialization) timer.Start();
- const v8::StartupData blob = SnapshotBlob();
- Vector<const byte> context_data = ExtractContextData(&blob);
+ const v8::StartupData* blob = isolate->snapshot_blob();
+ Vector<const byte> context_data = ExtractContextData(blob);
SnapshotData snapshot_data(context_data);
Deserializer deserializer(&snapshot_data);
@@ -84,7 +78,7 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
CHECK(result->IsContext());
// If the snapshot does not contain a custom script, we need to update
// the global object for exactly one context.
- CHECK(EmbedsScript() || (*outdated_contexts_out)->length() == 1);
+ CHECK(EmbedsScript(isolate) || (*outdated_contexts_out)->length() == 1);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
int bytes = context_data.length();
@@ -104,6 +98,23 @@ void CalculateFirstPageSizes(bool is_default_snapshot,
context_snapshot.Reservations();
int startup_index = 0;
int context_index = 0;
+
+ if (FLAG_profile_deserialization) {
+ int startup_total = 0;
+ int context_total = 0;
+ for (auto& reservation : startup_reservations) {
+ startup_total += reservation.chunk_size();
+ }
+ for (auto& reservation : context_reservations) {
+ context_total += reservation.chunk_size();
+ }
+ PrintF(
+ "Deserialization will reserve:\n"
+ "%10d bytes for startup\n"
+ "%10d bytes per context\n",
+ startup_total, context_total);
+ }
+
for (int space = 0; space < i::Serializer::kNumberOfSpaces; space++) {
bool single_chunk = true;
while (!startup_reservations[startup_index].is_last()) {
@@ -124,6 +135,8 @@ void CalculateFirstPageSizes(bool is_default_snapshot,
required = (startup_reservations[startup_index].chunk_size() +
2 * context_reservations[context_index].chunk_size()) +
Page::kObjectStartOffset;
+ // Add a small allowance to the code space for small scripts.
+ if (space == CODE_SPACE) required += 32 * KB;
} else {
// We expect the vanilla snapshot to only require on page per space.
DCHECK(!is_default_snapshot);
@@ -155,7 +168,7 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
uint32_t first_page_sizes[kNumPagedSpaces];
- CalculateFirstPageSizes(metadata.embeds_script(), startup_snapshot,
+ CalculateFirstPageSizes(!metadata.embeds_script(), startup_snapshot,
context_snapshot, first_page_sizes);
int startup_length = startup_data.length();
@@ -172,6 +185,14 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
memcpy(data + kStartupDataOffset, startup_data.begin(), startup_length);
memcpy(data + context_offset, context_data.begin(), context_length);
v8::StartupData result = {data, length};
+
+ if (FLAG_profile_deserialization) {
+ PrintF(
+ "Snapshot blob consists of:\n"
+ "%10d bytes for startup\n"
+ "%10d bytes for context\n",
+ startup_length, context_length);
+ }
return result;
}
diff --git a/deps/v8/src/snapshot-empty.cc b/deps/v8/src/snapshot/snapshot-empty.cc
index 020d1cb812..0eea940100 100644
--- a/deps/v8/src/snapshot-empty.cc
+++ b/deps/v8/src/snapshot/snapshot-empty.cc
@@ -6,7 +6,7 @@
#include "src/v8.h"
-#include "src/snapshot.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -19,10 +19,10 @@ namespace internal {
// below. This happens when compiling the mksnapshot utility.
void SetNativesFromFile(StartupData* data) { CHECK(false); }
void SetSnapshotFromFile(StartupData* data) { CHECK(false); }
+void ReadNatives() {}
+void DisposeNatives() {}
#endif // V8_USE_EXTERNAL_STARTUP_DATA
-const v8::StartupData Snapshot::SnapshotBlob() {
- return {NULL, 0};
-}
+const v8::StartupData* Snapshot::DefaultSnapshotBlob() { return NULL; }
} } // namespace v8::internal
diff --git a/deps/v8/src/snapshot-external.cc b/deps/v8/src/snapshot/snapshot-external.cc
index a9a5df1cd7..f5e3de49cd 100644
--- a/deps/v8/src/snapshot-external.cc
+++ b/deps/v8/src/snapshot/snapshot-external.cc
@@ -4,11 +4,11 @@
// Used for building with external snapshots.
-#include "src/snapshot.h"
+#include "src/snapshot/snapshot.h"
#include "src/base/platform/mutex.h"
-#include "src/serialize.h"
-#include "src/snapshot-source-sink.h"
+#include "src/snapshot/serialize.h"
+#include "src/snapshot/snapshot-source-sink.h"
#include "src/v8.h" // for V8::Initialize
@@ -35,9 +35,9 @@ void SetSnapshotFromFile(StartupData* snapshot_blob) {
}
-const v8::StartupData Snapshot::SnapshotBlob() {
+const v8::StartupData* Snapshot::DefaultSnapshotBlob() {
base::LockGuard<base::Mutex> lock_guard(
external_startup_data_mutex.Pointer());
- return external_startup_blob;
+ return &external_startup_blob;
}
} } // namespace v8::internal
diff --git a/deps/v8/src/snapshot-source-sink.cc b/deps/v8/src/snapshot/snapshot-source-sink.cc
index fd94724340..c0179b7fca 100644
--- a/deps/v8/src/snapshot-source-sink.cc
+++ b/deps/v8/src/snapshot/snapshot-source-sink.cc
@@ -3,28 +3,18 @@
// found in the LICENSE file.
-#include "src/snapshot-source-sink.h"
+#include "src/snapshot/snapshot-source-sink.h"
#include "src/base/logging.h"
#include "src/handles-inl.h"
-#include "src/serialize.h" // for SerializerDeserializer::nop() in AtEOF()
+#include "src/snapshot/serialize.h" // for SerializerDeserializer::nop()
namespace v8 {
namespace internal {
-int32_t SnapshotByteSource::GetUnalignedInt() {
- DCHECK(position_ < length_); // Require at least one byte left.
- int32_t answer = data_[position_];
- answer |= data_[position_ + 1] << 8;
- answer |= data_[position_ + 2] << 16;
- answer |= data_[position_ + 3] << 24;
- return answer;
-}
-
-
void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
- MemCopy(to, data_ + position_, number_of_bytes);
+ memcpy(to, data_ + position_, number_of_bytes);
position_ += number_of_bytes;
}
diff --git a/deps/v8/src/snapshot-source-sink.h b/deps/v8/src/snapshot/snapshot-source-sink.h
index 66feaecca4..6612029a90 100644
--- a/deps/v8/src/snapshot-source-sink.h
+++ b/deps/v8/src/snapshot/snapshot-source-sink.h
@@ -36,16 +36,18 @@ class SnapshotByteSource FINAL {
return data_[position_++];
}
- int32_t GetUnalignedInt();
-
void Advance(int by) { position_ += by; }
void CopyRaw(byte* to, int number_of_bytes);
inline int GetInt() {
- // This way of variable-length encoding integers does not suffer from branch
- // mispredictions.
- uint32_t answer = GetUnalignedInt();
+ // This way of decoding variable-length encoded integers does not
+ // suffer from branch mispredictions.
+ DCHECK(position_ + 3 < length_);
+ uint32_t answer = data_[position_];
+ answer |= data_[position_ + 1] << 8;
+ answer |= data_[position_ + 2] << 16;
+ answer |= data_[position_ + 3] << 24;
int bytes = (answer & 3) + 1;
Advance(bytes);
uint32_t mask = 0xffffffffu;
diff --git a/deps/v8/src/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 3135756a32..d2eaaa2d81 100644
--- a/deps/v8/src/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -3,7 +3,7 @@
// found in the LICENSE file.
#include "src/isolate.h"
-#include "src/serialize.h"
+#include "src/snapshot/serialize.h"
#ifndef V8_SNAPSHOT_H_
#define V8_SNAPSHOT_H_
@@ -36,14 +36,18 @@ class Snapshot : public AllStatic {
Isolate* isolate, Handle<JSGlobalProxy> global_proxy,
Handle<FixedArray>* outdated_contexts_out);
- static bool HaveASnapshotToStartFrom();
+ static bool HaveASnapshotToStartFrom(Isolate* isolate) {
+ // Do not use snapshots if the isolate is used to create snapshots.
+ return isolate->snapshot_blob() != NULL;
+ }
+
+ static bool EmbedsScript(Isolate* isolate);
- static bool EmbedsScript();
+ static uint32_t SizeOfFirstPage(Isolate* isolate, AllocationSpace space);
- static uint32_t SizeOfFirstPage(AllocationSpace space);
// To be implemented by the snapshot source.
- static const v8::StartupData SnapshotBlob();
+ static const v8::StartupData* DefaultSnapshotBlob();
static v8::StartupData CreateSnapshotBlob(
const StartupSerializer& startup_ser,
diff --git a/deps/v8/src/string-iterator.js b/deps/v8/src/string-iterator.js
index f5eef37716..df31c130b8 100644
--- a/deps/v8/src/string-iterator.js
+++ b/deps/v8/src/string-iterator.js
@@ -2,13 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+(function() {
+
"use strict";
+%CheckIsBootstrapping();
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// var $String = global.String;
+var GlobalArray = global.Array;
+var GlobalObject = global.Object;
+var GlobalString = global.String;
+//-------------------------------------------------------------------
var stringIteratorIteratedStringSymbol =
GLOBAL_PRIVATE("StringIterator#iteratedString");
@@ -75,35 +79,27 @@ function StringIteratorNext() {
}
-function SetUpStringIterator() {
- %CheckIsBootstrapping();
-
- %FunctionSetPrototype(StringIterator, new $Object());
- %FunctionSetInstanceClassName(StringIterator, 'String Iterator');
-
- InstallFunctions(StringIterator.prototype, DONT_ENUM, $Array(
- 'next', StringIteratorNext
- ));
- %FunctionSetName(StringIteratorIterator, '[Symbol.iterator]');
- %AddNamedProperty(StringIterator.prototype, symbolIterator,
- StringIteratorIterator, DONT_ENUM);
- %AddNamedProperty(StringIterator.prototype, symbolToStringTag,
- "String Iterator", READ_ONLY | DONT_ENUM);
-}
-SetUpStringIterator();
-
-
// 21.1.3.27 String.prototype [ @@iterator ]( )
function StringPrototypeIterator() {
return CreateStringIterator(this);
}
+//-------------------------------------------------------------------
-function ExtendStringPrototypeWithIterator() {
- %CheckIsBootstrapping();
+%FunctionSetPrototype(StringIterator, new GlobalObject());
+%FunctionSetInstanceClassName(StringIterator, 'String Iterator');
- %FunctionSetName(StringPrototypeIterator, '[Symbol.iterator]');
- %AddNamedProperty($String.prototype, symbolIterator,
- StringPrototypeIterator, DONT_ENUM);
-}
-ExtendStringPrototypeWithIterator();
+InstallFunctions(StringIterator.prototype, DONT_ENUM, GlobalArray(
+ 'next', StringIteratorNext
+));
+%FunctionSetName(StringIteratorIterator, '[Symbol.iterator]');
+%AddNamedProperty(StringIterator.prototype, symbolIterator,
+ StringIteratorIterator, DONT_ENUM);
+%AddNamedProperty(StringIterator.prototype, symbolToStringTag,
+ "String Iterator", READ_ONLY | DONT_ENUM);
+
+%FunctionSetName(StringPrototypeIterator, '[Symbol.iterator]');
+%AddNamedProperty(GlobalString.prototype, symbolIterator,
+ StringPrototypeIterator, DONT_ENUM);
+
+})();
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index ac5cb7f99e..0f29cd1dc1 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -2,11 +2,19 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// var $String = global.String;
+var $stringCharAt;
+var $stringIndexOf;
+var $stringSubstring;
-// -------------------------------------------------------------------
+(function() {
+
+%CheckIsBootstrapping();
+
+var GlobalArray = global.Array;
+var GlobalRegExp = global.RegExp;
+var GlobalString = global.String;
+
+//-------------------------------------------------------------------
function StringConstructor(x) {
if (%_ArgumentsLength() == 0) x = '';
@@ -14,7 +22,7 @@ function StringConstructor(x) {
%_SetValueOf(this, TO_STRING_INLINE(x));
} else {
return IS_SYMBOL(x) ?
- %_CallFunction(x, SymbolToString) : TO_STRING_INLINE(x);
+ %_CallFunction(x, $symbolToString) : TO_STRING_INLINE(x);
}
}
@@ -38,7 +46,7 @@ function StringValueOf() {
// ECMA-262, section 15.5.4.4
-function StringCharAt(pos) {
+function StringCharAtJS(pos) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.charAt");
var result = %_StringCharAt(this, pos);
@@ -50,7 +58,7 @@ function StringCharAt(pos) {
// ECMA-262 section 15.5.4.5
-function StringCharCodeAt(pos) {
+function StringCharCodeAtJS(pos) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.charCodeAt");
var result = %_StringCharCodeAt(this, pos);
@@ -64,11 +72,10 @@ function StringCharCodeAt(pos) {
// ECMA-262, section 15.5.4.6
function StringConcat(other /* and more */) { // length == 1
CHECK_OBJECT_COERCIBLE(this, "String.prototype.concat");
-
var len = %_ArgumentsLength();
var this_as_string = TO_STRING_INLINE(this);
if (len === 1) {
- return this_as_string + other;
+ return this_as_string + TO_STRING_INLINE(other);
}
var parts = new InternalArray(len + 1);
parts[0] = this_as_string;
@@ -147,16 +154,15 @@ function StringMatchJS(regexp) {
// value is discarded.
var lastIndex = regexp.lastIndex;
TO_INTEGER_FOR_SIDE_EFFECT(lastIndex);
- if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
- // lastMatchInfo is defined in regexp.js.
- var result = %StringMatch(subject, regexp, lastMatchInfo);
- if (result !== null) lastMatchInfoOverride = null;
+ if (!regexp.global) return $regexpExecNoTests(regexp, subject, 0);
+ var result = %StringMatch(subject, regexp, $regexpLastMatchInfo);
+ if (result !== null) $regexpLastMatchInfoOverride = null;
regexp.lastIndex = 0;
return result;
}
// Non-regexp argument.
- regexp = new $RegExp(regexp);
- return RegExpExecNoTests(regexp, subject, 0);
+ regexp = new GlobalRegExp(regexp);
+ return $regexpExecNoTests(regexp, subject, 0);
}
@@ -182,9 +188,9 @@ function StringNormalizeJS(form) {
}
-// This has the same size as the lastMatchInfo array, and can be used for
-// functions that expect that structure to be returned. It is used when the
-// needle is a string rather than a regexp. In this case we can't update
+// This has the same size as the $regexpLastMatchInfo array, and can be used
+// for functions that expect that structure to be returned. It is used when
+// the needle is a string rather than a regexp. In this case we can't update
// lastMatchArray without erroneously affecting the properties on the global
// RegExp object.
var reusableMatchInfo = [2, "", "", -1, -1];
@@ -224,7 +230,7 @@ function StringReplace(search, replace) {
if (!search.global) {
// Non-global regexp search, string replace.
- var match = DoRegExpExec(search, subject, 0);
+ var match = $regexpExec(search, subject, 0);
if (match == null) {
search.lastIndex = 0
return subject;
@@ -233,28 +239,28 @@ function StringReplace(search, replace) {
return %_SubString(subject, 0, match[CAPTURE0]) +
%_SubString(subject, match[CAPTURE1], subject.length)
}
- return ExpandReplacement(replace, subject, lastMatchInfo,
+ return ExpandReplacement(replace, subject, $regexpLastMatchInfo,
%_SubString(subject, 0, match[CAPTURE0])) +
%_SubString(subject, match[CAPTURE1], subject.length);
}
// Global regexp search, string replace.
search.lastIndex = 0;
- if (lastMatchInfoOverride == null) {
+ if ($regexpLastMatchInfoOverride == null) {
return %StringReplaceGlobalRegExpWithString(
- subject, search, replace, lastMatchInfo);
+ subject, search, replace, $regexpLastMatchInfo);
} else {
// We use this hack to detect whether StringReplaceRegExpWithString
// found at least one hit. In that case we need to remove any
// override.
- var saved_subject = lastMatchInfo[LAST_SUBJECT_INDEX];
- lastMatchInfo[LAST_SUBJECT_INDEX] = 0;
+ var saved_subject = $regexpLastMatchInfo[LAST_SUBJECT_INDEX];
+ $regexpLastMatchInfo[LAST_SUBJECT_INDEX] = 0;
var answer = %StringReplaceGlobalRegExpWithString(
- subject, search, replace, lastMatchInfo);
- if (%_IsSmi(lastMatchInfo[LAST_SUBJECT_INDEX])) {
- lastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
+ subject, search, replace, $regexpLastMatchInfo);
+ if (%_IsSmi($regexpLastMatchInfo[LAST_SUBJECT_INDEX])) {
+ $regexpLastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
} else {
- lastMatchInfoOverride = null;
+ $regexpLastMatchInfoOverride = null;
}
return answer;
}
@@ -418,7 +424,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
}
var res = %RegExpExecMultiple(regexp,
subject,
- lastMatchInfo,
+ $regexpLastMatchInfo,
resultArray);
regexp.lastIndex = 0;
if (IS_NULL(res)) {
@@ -427,7 +433,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
return subject;
}
var len = res.length;
- if (NUMBER_OF_CAPTURES(lastMatchInfo) == 2) {
+ if (NUMBER_OF_CAPTURES($regexpLastMatchInfo) == 2) {
// If the number of captures is two then there are no explicit captures in
// the regexp, just the implicit capture that captures the whole match. In
// this case we can simplify quite a bit and end up with something faster.
@@ -451,7 +457,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
} else {
override[0] = elem;
override[1] = match_start;
- lastMatchInfoOverride = override;
+ $regexpLastMatchInfoOverride = override;
var func_result =
%_CallFunction(receiver, elem, match_start, subject, replace);
// Overwrite the i'th element in the results with the string we got
@@ -467,7 +473,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
if (!%_IsSmi(elem)) {
// elem must be an Array.
// Use the apply argument as backing for global RegExp properties.
- lastMatchInfoOverride = elem;
+ $regexpLastMatchInfoOverride = elem;
var func_result = %Apply(replace, receiver, elem, 0, elem.length);
// Overwrite the i'th element in the results with the string we got
// back from the callback function.
@@ -483,7 +489,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
- var matchInfo = DoRegExpExec(regexp, subject, 0);
+ var matchInfo = $regexpExec(regexp, subject, 0);
if (IS_NULL(matchInfo)) {
regexp.lastIndex = 0;
return subject;
@@ -530,9 +536,9 @@ function StringSearch(re) {
} else if (IS_REGEXP(re)) {
regexp = re;
} else {
- regexp = new $RegExp(re);
+ regexp = new GlobalRegExp(re);
}
- var match = DoRegExpExec(regexp, TO_STRING_INLINE(this), 0);
+ var match = $regexpExec(regexp, TO_STRING_INLINE(this), 0);
if (match) {
return match[CAPTURE0];
}
@@ -618,7 +624,7 @@ function StringSplitJS(separator, limit) {
function StringSplitOnRegExp(subject, separator, limit, length) {
if (length === 0) {
- if (DoRegExpExec(separator, subject, 0, 0) != null) {
+ if ($regexpExec(separator, subject, 0, 0) != null) {
return [];
}
return [subject];
@@ -637,7 +643,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) {
break;
}
- var matchInfo = DoRegExpExec(separator, subject, startIndex);
+ var matchInfo = $regexpExec(separator, subject, startIndex);
if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) {
result[result.length] = %_SubString(subject, currentIndex, length);
break;
@@ -924,62 +930,255 @@ function StringSup() {
return "<sup>" + this + "</sup>";
}
+// ES6 draft 01-20-14, section 21.1.3.13
+function StringRepeat(count) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.repeat");
+
+ var s = TO_STRING_INLINE(this);
+ var n = ToInteger(count);
+ // The maximum string length is stored in a smi, so a longer repeat
+ // must result in a range error.
+ if (n < 0 || n > %_MaxSmi()) {
+ throw MakeRangeError("invalid_count_value", []);
+ }
+
+ var r = "";
+ while (true) {
+ if (n & 1) r += s;
+ n >>= 1;
+ if (n === 0) return r;
+ s += s;
+ }
+}
+
+
+// ES6 draft 04-05-14, section 21.1.3.18
+function StringStartsWith(searchString /* position */) { // length == 1
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.startsWith");
+
+ var s = TO_STRING_INLINE(this);
+
+ if (IS_REGEXP(searchString)) {
+ throw MakeTypeError("first_argument_not_regexp",
+ ["String.prototype.startsWith"]);
+ }
+
+ var ss = TO_STRING_INLINE(searchString);
+ var pos = 0;
+ if (%_ArgumentsLength() > 1) {
+ pos = %_Arguments(1); // position
+ pos = ToInteger(pos);
+ }
+
+ var s_len = s.length;
+ var start = $min($max(pos, 0), s_len);
+ var ss_len = ss.length;
+ if (ss_len + start > s_len) {
+ return false;
+ }
+
+ return %StringIndexOf(s, ss, start) === start;
+}
+
+
+// ES6 draft 04-05-14, section 21.1.3.7
+function StringEndsWith(searchString /* position */) { // length == 1
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.endsWith");
+
+ var s = TO_STRING_INLINE(this);
+
+ if (IS_REGEXP(searchString)) {
+ throw MakeTypeError("first_argument_not_regexp",
+ ["String.prototype.endsWith"]);
+ }
+
+ var ss = TO_STRING_INLINE(searchString);
+ var s_len = s.length;
+ var pos = s_len;
+ if (%_ArgumentsLength() > 1) {
+ var arg = %_Arguments(1); // position
+ if (!IS_UNDEFINED(arg)) {
+ pos = ToInteger(arg);
+ }
+ }
+
+ var end = $min($max(pos, 0), s_len);
+ var ss_len = ss.length;
+ var start = end - ss_len;
+ if (start < 0) {
+ return false;
+ }
+
+ return %StringLastIndexOf(s, ss, start) === start;
+}
+
+
+// ES6 draft 04-05-14, section 21.1.3.6
+function StringIncludes(searchString /* position */) { // length == 1
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.includes");
+
+ var s = TO_STRING_INLINE(this);
+
+ if (IS_REGEXP(searchString)) {
+ throw MakeTypeError("first_argument_not_regexp",
+ ["String.prototype.includes"]);
+ }
+
+ var ss = TO_STRING_INLINE(searchString);
+ var pos = 0;
+ if (%_ArgumentsLength() > 1) {
+ pos = %_Arguments(1); // position
+ pos = ToInteger(pos);
+ }
+
+ var s_len = s.length;
+ var start = $min($max(pos, 0), s_len);
+ var ss_len = ss.length;
+ if (ss_len + start > s_len) {
+ return false;
+ }
+
+ return %StringIndexOf(s, ss, start) !== -1;
+}
+
+
+// ES6 Draft 05-22-2014, section 21.1.3.3
+function StringCodePointAt(pos) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.codePointAt");
+
+ var string = TO_STRING_INLINE(this);
+ var size = string.length;
+ pos = TO_INTEGER(pos);
+ if (pos < 0 || pos >= size) {
+ return UNDEFINED;
+ }
+ var first = %_StringCharCodeAt(string, pos);
+ if (first < 0xD800 || first > 0xDBFF || pos + 1 == size) {
+ return first;
+ }
+ var second = %_StringCharCodeAt(string, pos + 1);
+ if (second < 0xDC00 || second > 0xDFFF) {
+ return first;
+ }
+ return (first - 0xD800) * 0x400 + second + 0x2400;
+}
+
+
+// ES6 Draft 05-22-2014, section 21.1.2.2
+function StringFromCodePoint(_) { // length = 1
+ var code;
+ var length = %_ArgumentsLength();
+ var index;
+ var result = "";
+ for (index = 0; index < length; index++) {
+ code = %_Arguments(index);
+ if (!%_IsSmi(code)) {
+ code = ToNumber(code);
+ }
+ if (code < 0 || code > 0x10FFFF || code !== TO_INTEGER(code)) {
+ throw MakeRangeError("invalid_code_point", [code]);
+ }
+ if (code <= 0xFFFF) {
+ result += %_StringCharFromCode(code);
+ } else {
+ code -= 0x10000;
+ result += %_StringCharFromCode((code >>> 10) & 0x3FF | 0xD800);
+ result += %_StringCharFromCode(code & 0x3FF | 0xDC00);
+ }
+ }
+ return result;
+}
+
+
+// -------------------------------------------------------------------
+// String methods related to templates
+
+// ES6 Draft 03-17-2015, section 21.1.2.4
+function StringRaw(callSite) {
+ // TODO(caitp): Use rest parameters when implemented
+ var numberOfSubstitutions = %_ArgumentsLength();
+ var cooked = ToObject(callSite);
+ var raw = ToObject(cooked.raw);
+ var literalSegments = ToLength(raw.length);
+ if (literalSegments <= 0) return "";
+
+ var result = ToString(raw[0]);
+
+ for (var i = 1; i < literalSegments; ++i) {
+ if (i < numberOfSubstitutions) {
+ result += ToString(%_Arguments(i));
+ }
+ result += ToString(raw[i]);
+ }
+
+ return result;
+}
+
// -------------------------------------------------------------------
-function SetUpString() {
- %CheckIsBootstrapping();
-
- // Set the String function and constructor.
- %SetCode($String, StringConstructor);
- %FunctionSetPrototype($String, new $String());
-
- // Set up the constructor property on the String prototype object.
- %AddNamedProperty($String.prototype, "constructor", $String, DONT_ENUM);
-
- // Set up the non-enumerable functions on the String object.
- InstallFunctions($String, DONT_ENUM, $Array(
- "fromCharCode", StringFromCharCode
- ));
-
- // Set up the non-enumerable functions on the String prototype object.
- InstallFunctions($String.prototype, DONT_ENUM, $Array(
- "valueOf", StringValueOf,
- "toString", StringToString,
- "charAt", StringCharAt,
- "charCodeAt", StringCharCodeAt,
- "concat", StringConcat,
- "indexOf", StringIndexOfJS,
- "lastIndexOf", StringLastIndexOfJS,
- "localeCompare", StringLocaleCompareJS,
- "match", StringMatchJS,
- "normalize", StringNormalizeJS,
- "replace", StringReplace,
- "search", StringSearch,
- "slice", StringSlice,
- "split", StringSplitJS,
- "substring", StringSubstring,
- "substr", StringSubstr,
- "toLowerCase", StringToLowerCaseJS,
- "toLocaleLowerCase", StringToLocaleLowerCase,
- "toUpperCase", StringToUpperCaseJS,
- "toLocaleUpperCase", StringToLocaleUpperCase,
- "trim", StringTrimJS,
- "trimLeft", StringTrimLeft,
- "trimRight", StringTrimRight,
- "link", StringLink,
- "anchor", StringAnchor,
- "fontcolor", StringFontcolor,
- "fontsize", StringFontsize,
- "big", StringBig,
- "blink", StringBlink,
- "bold", StringBold,
- "fixed", StringFixed,
- "italics", StringItalics,
- "small", StringSmall,
- "strike", StringStrike,
- "sub", StringSub,
- "sup", StringSup
- ));
-}
-
-SetUpString();
+// Set the String function and constructor.
+%SetCode(GlobalString, StringConstructor);
+%FunctionSetPrototype(GlobalString, new GlobalString());
+
+// Set up the constructor property on the String prototype object.
+%AddNamedProperty(
+ GlobalString.prototype, "constructor", GlobalString, DONT_ENUM);
+
+// Set up the non-enumerable functions on the String object.
+InstallFunctions(GlobalString, DONT_ENUM, GlobalArray(
+ "fromCharCode", StringFromCharCode,
+ "fromCodePoint", StringFromCodePoint,
+ "raw", StringRaw
+));
+
+// Set up the non-enumerable functions on the String prototype object.
+InstallFunctions(GlobalString.prototype, DONT_ENUM, GlobalArray(
+ "valueOf", StringValueOf,
+ "toString", StringToString,
+ "charAt", StringCharAtJS,
+ "charCodeAt", StringCharCodeAtJS,
+ "codePointAt", StringCodePointAt,
+ "concat", StringConcat,
+ "endsWith", StringEndsWith,
+ "includes", StringIncludes,
+ "indexOf", StringIndexOfJS,
+ "lastIndexOf", StringLastIndexOfJS,
+ "localeCompare", StringLocaleCompareJS,
+ "match", StringMatchJS,
+ "normalize", StringNormalizeJS,
+ "repeat", StringRepeat,
+ "replace", StringReplace,
+ "search", StringSearch,
+ "slice", StringSlice,
+ "split", StringSplitJS,
+ "substring", StringSubstring,
+ "substr", StringSubstr,
+ "startsWith", StringStartsWith,
+ "toLowerCase", StringToLowerCaseJS,
+ "toLocaleLowerCase", StringToLocaleLowerCase,
+ "toUpperCase", StringToUpperCaseJS,
+ "toLocaleUpperCase", StringToLocaleUpperCase,
+ "trim", StringTrimJS,
+ "trimLeft", StringTrimLeft,
+ "trimRight", StringTrimRight,
+
+ "link", StringLink,
+ "anchor", StringAnchor,
+ "fontcolor", StringFontcolor,
+ "fontsize", StringFontsize,
+ "big", StringBig,
+ "blink", StringBlink,
+ "bold", StringBold,
+ "fixed", StringFixed,
+ "italics", StringItalics,
+ "small", StringSmall,
+ "strike", StringStrike,
+ "sub", StringSub,
+ "sup", StringSup
+));
+
+$stringCharAt = StringCharAtJS;
+$stringIndexOf = StringIndexOfJS;
+$stringSubstring = StringSubstring;
+
+})();
diff --git a/deps/v8/src/strings-storage.cc b/deps/v8/src/strings-storage.cc
new file mode 100644
index 0000000000..1d862d6b24
--- /dev/null
+++ b/deps/v8/src/strings-storage.cc
@@ -0,0 +1,123 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/strings-storage.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+bool StringsStorage::StringsMatch(void* key1, void* key2) {
+ return strcmp(reinterpret_cast<char*>(key1), reinterpret_cast<char*>(key2)) ==
+ 0;
+}
+
+
+StringsStorage::StringsStorage(Heap* heap)
+ : hash_seed_(heap->HashSeed()), names_(StringsMatch) {}
+
+
+StringsStorage::~StringsStorage() {
+ for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+ DeleteArray(reinterpret_cast<const char*>(p->value));
+ }
+}
+
+
+const char* StringsStorage::GetCopy(const char* src) {
+ int len = static_cast<int>(strlen(src));
+ HashMap::Entry* entry = GetEntry(src, len);
+ if (entry->value == NULL) {
+ Vector<char> dst = Vector<char>::New(len + 1);
+ StrNCpy(dst, src, len);
+ dst[len] = '\0';
+ entry->key = dst.start();
+ entry->value = entry->key;
+ }
+ return reinterpret_cast<const char*>(entry->value);
+}
+
+
+const char* StringsStorage::GetFormatted(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ const char* result = GetVFormatted(format, args);
+ va_end(args);
+ return result;
+}
+
+
+const char* StringsStorage::AddOrDisposeString(char* str, int len) {
+ HashMap::Entry* entry = GetEntry(str, len);
+ if (entry->value == NULL) {
+ // New entry added.
+ entry->key = str;
+ entry->value = str;
+ } else {
+ DeleteArray(str);
+ }
+ return reinterpret_cast<const char*>(entry->value);
+}
+
+
+const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
+ Vector<char> str = Vector<char>::New(1024);
+ int len = VSNPrintF(str, format, args);
+ if (len == -1) {
+ DeleteArray(str.start());
+ return GetCopy(format);
+ }
+ return AddOrDisposeString(str.start(), len);
+}
+
+
+const char* StringsStorage::GetName(Name* name) {
+ if (name->IsString()) {
+ String* str = String::cast(name);
+ int length = Min(kMaxNameSize, str->length());
+ int actual_length = 0;
+ SmartArrayPointer<char> data = str->ToCString(
+ DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length, &actual_length);
+ return AddOrDisposeString(data.Detach(), actual_length);
+ } else if (name->IsSymbol()) {
+ return "<symbol>";
+ }
+ return "";
+}
+
+
+const char* StringsStorage::GetName(int index) {
+ return GetFormatted("%d", index);
+}
+
+
+const char* StringsStorage::GetFunctionName(Name* name) {
+ return GetName(name);
+}
+
+
+const char* StringsStorage::GetFunctionName(const char* name) {
+ return GetCopy(name);
+}
+
+
+size_t StringsStorage::GetUsedMemorySize() const {
+ size_t size = sizeof(*this);
+ size += sizeof(HashMap::Entry) * names_.capacity();
+ for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
+ size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
+ }
+ return size;
+}
+
+
+HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
+ uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
+ return names_.Lookup(const_cast<char*>(str), hash, true);
+}
+}
+} // namespace v8::internal
diff --git a/deps/v8/src/strings-storage.h b/deps/v8/src/strings-storage.h
new file mode 100644
index 0000000000..8fd9da7d3c
--- /dev/null
+++ b/deps/v8/src/strings-storage.h
@@ -0,0 +1,47 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRINGS_STORAGE_H_
+#define V8_STRINGS_STORAGE_H_
+
+#include "src/allocation.h"
+#include "src/hashmap.h"
+
+namespace v8 {
+namespace internal {
+
+struct OffsetRange;
+
+// Provides a storage of strings allocated in C++ heap, to hold them
+// forever, even if they disappear from JS heap or external storage.
+class StringsStorage {
+ public:
+ explicit StringsStorage(Heap* heap);
+ ~StringsStorage();
+
+ const char* GetCopy(const char* src);
+ const char* GetFormatted(const char* format, ...);
+ const char* GetVFormatted(const char* format, va_list args);
+ const char* GetName(Name* name);
+ const char* GetName(int index);
+ const char* GetFunctionName(Name* name);
+ const char* GetFunctionName(const char* name);
+ size_t GetUsedMemorySize() const;
+
+ private:
+ static const int kMaxNameSize = 1024;
+
+ static bool StringsMatch(void* key1, void* key2);
+ const char* AddOrDisposeString(char* str, int len);
+ HashMap::Entry* GetEntry(const char* str, int len);
+
+ uint32_t hash_seed_;
+ HashMap names_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringsStorage);
+};
+}
+} // namespace v8::internal
+
+#endif // V8_STRINGS_STORAGE_H_
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/symbol.js
index d9cf79265f..7a80815229 100644
--- a/deps/v8/src/symbol.js
+++ b/deps/v8/src/symbol.js
@@ -2,13 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-"use strict";
-
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// var $Array = global.Array;
-
-// And requires following symbols to be set in the bootstrapper during genesis:
+// Expects following symbols to be set in the bootstrapper during genesis:
// - symbolHasInstance
// - symbolIsConcatSpreadable
// - symbolIsRegExp
@@ -16,7 +10,17 @@
// - symbolToStringTag
// - symbolUnscopables
-var $Symbol = global.Symbol;
+var $symbolToString;
+
+(function() {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalArray = global.Array;
+var GlobalObject = global.Object;
+var GlobalSymbol = global.Symbol;
// -------------------------------------------------------------------
@@ -77,46 +81,40 @@ function ObjectGetOwnPropertySymbols(obj) {
//-------------------------------------------------------------------
-function SetUpSymbol() {
- %CheckIsBootstrapping();
-
- %SetCode($Symbol, SymbolConstructor);
- %FunctionSetPrototype($Symbol, new $Object());
-
- InstallConstants($Symbol, $Array(
- // TODO(rossberg): expose when implemented.
- // "hasInstance", symbolHasInstance,
- // "isConcatSpreadable", symbolIsConcatSpreadable,
- // "isRegExp", symbolIsRegExp,
- "iterator", symbolIterator,
- // TODO(dslomov, caitp): Currently defined in harmony-tostring.js ---
- // Move here when shipping
- // "toStringTag", symbolToStringTag,
- "unscopables", symbolUnscopables
- ));
- InstallFunctions($Symbol, DONT_ENUM, $Array(
- "for", SymbolFor,
- "keyFor", SymbolKeyFor
- ));
-
- %AddNamedProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
- %AddNamedProperty(
- $Symbol.prototype, symbolToStringTag, "Symbol", DONT_ENUM | READ_ONLY);
- InstallFunctions($Symbol.prototype, DONT_ENUM, $Array(
- "toString", SymbolToString,
- "valueOf", SymbolValueOf
- ));
-}
-
-SetUpSymbol();
-
-
-function ExtendObject() {
- %CheckIsBootstrapping();
-
- InstallFunctions($Object, DONT_ENUM, $Array(
- "getOwnPropertySymbols", ObjectGetOwnPropertySymbols
- ));
-}
-
-ExtendObject();
+%SetCode(GlobalSymbol, SymbolConstructor);
+%FunctionSetPrototype(GlobalSymbol, new GlobalObject());
+
+InstallConstants(GlobalSymbol, GlobalArray(
+ // TODO(rossberg): expose when implemented.
+ // "hasInstance", symbolHasInstance,
+ // "isConcatSpreadable", symbolIsConcatSpreadable,
+ // "isRegExp", symbolIsRegExp,
+ "iterator", symbolIterator,
+ // TODO(dslomov, caitp): Currently defined in harmony-tostring.js ---
+ // Move here when shipping
+ // "toStringTag", symbolToStringTag,
+ "unscopables", symbolUnscopables
+));
+
+InstallFunctions(GlobalSymbol, DONT_ENUM, GlobalArray(
+ "for", SymbolFor,
+ "keyFor", SymbolKeyFor
+));
+
+%AddNamedProperty(
+ GlobalSymbol.prototype, "constructor", GlobalSymbol, DONT_ENUM);
+%AddNamedProperty(
+ GlobalSymbol.prototype, symbolToStringTag, "Symbol", DONT_ENUM | READ_ONLY);
+
+InstallFunctions(GlobalSymbol.prototype, DONT_ENUM, GlobalArray(
+ "toString", SymbolToString,
+ "valueOf", SymbolValueOf
+));
+
+InstallFunctions(GlobalObject, DONT_ENUM, GlobalArray(
+ "getOwnPropertySymbols", ObjectGetOwnPropertySymbols
+));
+
+$symbolToString = SymbolToString;
+
+})();
diff --git a/deps/v8/src/harmony-templates.js b/deps/v8/src/templates.js
index 254f434fdf..20f8af5f68 100644
--- a/deps/v8/src/harmony-templates.js
+++ b/deps/v8/src/templates.js
@@ -58,37 +58,3 @@ function GetTemplateCallSite(siteObj, rawStrings, hash) {
return SetCachedCallSite(%ObjectFreeze(siteObj), hash);
}
-
-
-// ES6 Draft 10-14-2014, section 21.1.2.4
-function StringRaw(callSite) {
- // TODO(caitp): Use rest parameters when implemented
- var numberOfSubstitutions = %_ArgumentsLength();
- var cooked = ToObject(callSite);
- var raw = ToObject(cooked.raw);
- var literalSegments = ToLength(raw.length);
- if (literalSegments <= 0) return "";
-
- var result = ToString(raw[0]);
-
- for (var i = 1; i < literalSegments; ++i) {
- if (i < numberOfSubstitutions) {
- result += ToString(%_Arguments(i));
- }
- result += ToString(raw[i]);
- }
-
- return result;
-}
-
-
-function ExtendStringForTemplates() {
- %CheckIsBootstrapping();
-
- // Set up the non-enumerable functions on the String object.
- InstallFunctions($String, DONT_ENUM, $Array(
- "raw", StringRaw
- ));
-}
-
-ExtendStringForTemplates();
diff --git a/deps/v8/src/third_party/fdlibm/fdlibm.js b/deps/v8/src/third_party/fdlibm/fdlibm.js
index 8804469905..e0373922c3 100644
--- a/deps/v8/src/third_party/fdlibm/fdlibm.js
+++ b/deps/v8/src/third_party/fdlibm/fdlibm.js
@@ -23,11 +23,20 @@
// rempio2result is used as a container for return values of %RemPiO2. It is
// initialized to a two-element Float64Array during genesis.
-"use strict";
-
var kMath;
var rempio2result;
+(function() {
+
+"use strict";
+
+%CheckIsBootstrapping();
+
+var GlobalMath = global.Math;
+var GlobalArray = global.Array;
+
+//-------------------------------------------------------------------
+
const INVPIO2 = kMath[0];
const PIO2_1 = kMath[1];
const PIO2_1T = kMath[2];
@@ -79,7 +88,7 @@ macro REMPIO2(X)
}
} else if (ix <= 0x413921fb) {
// |X| ~<= 2^19*(pi/2), medium size
- var t = MathAbs(X);
+ var t = $abs(X);
n = (t * INVPIO2 + 0.5) | 0;
var r = t - n * PIO2_1;
var w = n * PIO2_1T;
@@ -141,16 +150,18 @@ endmacro
// then 3 2
// sin(x) = X + (S1*X + (X *(r-Y/2)+Y))
//
-macro KSIN(x)
-kMath[7+x]
-endmacro
+const S1 = -1.66666666666666324348e-01;
+const S2 = 8.33333333332248946124e-03;
+const S3 = -1.98412698298579493134e-04;
+const S4 = 2.75573137070700676789e-06;
+const S5 = -2.50507602534068634195e-08;
+const S6 = 1.58969099521155010221e-10;
macro RETURN_KERNELSIN(X, Y, SIGN)
var z = X * X;
var v = z * X;
- var r = KSIN(1) + z * (KSIN(2) + z * (KSIN(3) +
- z * (KSIN(4) + z * KSIN(5))));
- return (X - ((z * (0.5 * Y - v * r) - Y) - v * KSIN(0))) SIGN;
+ var r = S2 + z * (S3 + z * (S4 + z * (S5 + z * S6)));
+ return (X - ((z * (0.5 * Y - v * r) - Y) - v * S1)) SIGN;
endmacro
// __kernel_cos(X, Y)
@@ -185,15 +196,17 @@ endmacro
// magnitude of the latter is at least a quarter of X*X/2,
// thus, reducing the rounding error in the subtraction.
//
-macro KCOS(x)
-kMath[13+x]
-endmacro
+const C1 = 4.16666666666666019037e-02;
+const C2 = -1.38888888888741095749e-03;
+const C3 = 2.48015872894767294178e-05;
+const C4 = -2.75573143513906633035e-07;
+const C5 = 2.08757232129817482790e-09;
+const C6 = -1.13596475577881948265e-11;
macro RETURN_KERNELCOS(X, Y, SIGN)
var ix = %_DoubleHi(X) & 0x7fffffff;
var z = X * X;
- var r = z * (KCOS(0) + z * (KCOS(1) + z * (KCOS(2)+
- z * (KCOS(3) + z * (KCOS(4) + z * KCOS(5))))));
+ var r = z * (C1 + z * (C2 + z * (C3 + z * (C4 + z * (C5 + z * C6)))));
if (ix < 0x3fd33333) { // |x| ~< 0.3
return (1 - (0.5 * z - (z * r - X * Y))) SIGN;
} else {
@@ -257,7 +270,7 @@ function KernelTan(x, y, returnTan) {
if (ix < 0x3e300000) { // |x| < 2^-28
if (((ix | %_DoubleLo(x)) | (returnTan + 1)) == 0) {
// x == 0 && returnTan = -1
- return 1 / MathAbs(x);
+ return 1 / $abs(x);
} else {
if (returnTan == 1) {
return x;
@@ -336,22 +349,22 @@ function MathCosSlow(x) {
// ECMA 262 - 15.8.2.16
function MathSin(x) {
- x = x * 1; // Convert to number.
+ x = +x; // Convert to number.
if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
// |x| < pi/4, approximately. No reduction needed.
RETURN_KERNELSIN(x, 0, /* empty */);
}
- return MathSinSlow(x);
+ return +MathSinSlow(x);
}
// ECMA 262 - 15.8.2.7
function MathCos(x) {
- x = x * 1; // Convert to number.
+ x = +x; // Convert to number.
if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
// |x| < pi/4, approximately. No reduction needed.
RETURN_KERNELCOS(x, 0, /* empty */);
}
- return MathCosSlow(x);
+ return +MathCosSlow(x);
}
// ECMA 262 - 15.8.2.18
@@ -745,7 +758,7 @@ function MathSinh(x) {
x = x * 1; // Convert to number.
var h = (x < 0) ? -0.5 : 0.5;
// |x| in [0, 22]. return sign(x)*0.5*(E+E/(E+1))
- var ax = MathAbs(x);
+ var ax = $abs(x);
if (ax < 22) {
// For |x| < 2^-28, sinh(x) = x
if (ax < TWO_M28) return x;
@@ -754,11 +767,11 @@ function MathSinh(x) {
return h * (t + t / (t + 1));
}
// |x| in [22, log(maxdouble)], return 0.5 * exp(|x|)
- if (ax < LOG_MAXD) return h * MathExp(ax);
+ if (ax < LOG_MAXD) return h * $exp(ax);
// |x| in [log(maxdouble), overflowthreshold]
// overflowthreshold = 710.4758600739426
if (ax <= KSINH_OVERFLOW) {
- var w = MathExp(0.5 * ax);
+ var w = $exp(0.5 * ax);
var t = h * w;
return t * w;
}
@@ -796,7 +809,7 @@ function MathCosh(x) {
var ix = %_DoubleHi(x) & 0x7fffffff;
// |x| in [0,0.5*log2], return 1+expm1(|x|)^2/(2*exp(|x|))
if (ix < 0x3fd62e43) {
- var t = MathExpm1(MathAbs(x));
+ var t = MathExpm1($abs(x));
var w = 1 + t;
// For |x| < 2^-55, cosh(x) = 1
if (ix < 0x3c800000) return w;
@@ -804,14 +817,14 @@ function MathCosh(x) {
}
// |x| in [0.5*log2, 22], return (exp(|x|)+1/exp(|x|)/2
if (ix < 0x40360000) {
- var t = MathExp(MathAbs(x));
+ var t = $exp($abs(x));
return 0.5 * t + 0.5 / t;
}
// |x| in [22, log(maxdouble)], return half*exp(|x|)
- if (ix < 0x40862e42) return 0.5 * MathExp(MathAbs(x));
+ if (ix < 0x40862e42) return 0.5 * $exp($abs(x));
// |x| in [log(maxdouble), overflowthreshold]
- if (MathAbs(x) <= KCOSH_OVERFLOW) {
- var w = MathExp(0.5 * MathAbs(x));
+ if ($abs(x) <= KCOSH_OVERFLOW) {
+ var w = $exp(0.5 * $abs(x));
var t = 0.5 * w;
return t * w;
}
@@ -879,7 +892,7 @@ function MathLog10(x) {
y = k + i;
x = %_ConstructDouble(hx, lx);
- z = y * LOG10_2LO + IVLN10 * MathLog(x);
+ z = y * LOG10_2LO + IVLN10 * %_MathLogRT(x);
return z + y * LOG10_2HI;
}
@@ -914,7 +927,7 @@ const TWO53 = 9007199254740992;
function MathLog2(x) {
x = x * 1; // Convert to number.
- var ax = MathAbs(x);
+ var ax = $abs(x);
var hx = %_DoubleHi(x);
var lx = %_DoubleLo(x);
var ix = hx & 0x7fffffff;
@@ -997,3 +1010,22 @@ function MathLog2(x) {
// t1 + t2 = log2(ax), sum up because we do not care about extra precision.
return t1 + t2;
}
+
+//-------------------------------------------------------------------
+
+InstallFunctions(GlobalMath, DONT_ENUM, GlobalArray(
+ "cos", MathCos,
+ "sin", MathSin,
+ "tan", MathTan,
+ "sinh", MathSinh,
+ "cosh", MathCosh,
+ "log10", MathLog10,
+ "log2", MathLog2,
+ "log1p", MathLog1p,
+ "expm1", MathExpm1
+));
+
+%SetInlineBuiltinFlag(MathSin);
+%SetInlineBuiltinFlag(MathCos);
+
+})();
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 0f46b118cd..cc7e9f8cbd 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -40,7 +40,7 @@ namespace internal {
T(COLON, ":", 0) \
T(SEMICOLON, ";", 0) \
T(PERIOD, ".", 0) \
- T(ELLIPSIS, "...", 0) \
+ T(ELLIPSIS, "...", 0) \
T(CONDITIONAL, "?", 3) \
T(INC, "++", 0) \
T(DEC, "--", 0) \
@@ -142,6 +142,7 @@ namespace internal {
K(TRUE_LITERAL, "true", 0) \
K(FALSE_LITERAL, "false", 0) \
T(NUMBER, NULL, 0) \
+ T(SMI, NULL, 0) \
T(STRING, NULL, 0) \
\
/* Identifiers (not keywords or future reserved words). */ \
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index fd8eb8b0b6..f31eff96ba 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -11,55 +11,19 @@ namespace v8 {
namespace internal {
-#define FIELD_ADDR(p, offset) \
- (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
-
-#define WRITE_FIELD(p, offset, value) \
- (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
-
-#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
- if (mode == UPDATE_WRITE_BARRIER) { \
- heap->incremental_marking()->RecordWrite( \
- object, HeapObject::RawField(object, offset), value); \
- if (heap->InNewSpace(value)) { \
- heap->RecordWrite(object->address(), offset); \
- } \
- }
-
-
TransitionArray* TransitionArray::cast(Object* object) {
DCHECK(object->IsTransitionArray());
return reinterpret_cast<TransitionArray*>(object);
}
-bool TransitionArray::HasElementsTransition() {
- return SearchSpecial(GetHeap()->elements_transition_symbol()) != kNotFound;
-}
-
-
-Object* TransitionArray::back_pointer_storage() {
- return get(kBackPointerStorageIndex);
-}
-
-
-void TransitionArray::set_back_pointer_storage(Object* back_pointer,
- WriteBarrierMode mode) {
- Heap* heap = GetHeap();
- WRITE_FIELD(this, kBackPointerStorageOffset, back_pointer);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kBackPointerStorageOffset, back_pointer, mode);
-}
-
-
bool TransitionArray::HasPrototypeTransitions() {
- return IsFullTransitionArray() &&
- get(kPrototypeTransitionsIndex) != Smi::FromInt(0);
+ return get(kPrototypeTransitionsIndex) != Smi::FromInt(0);
}
FixedArray* TransitionArray::GetPrototypeTransitions() {
- DCHECK(IsFullTransitionArray());
+ DCHECK(HasPrototypeTransitions()); // Callers must check first.
Object* prototype_transitions = get(kPrototypeTransitionsIndex);
return FixedArray::cast(prototype_transitions);
}
@@ -67,88 +31,68 @@ FixedArray* TransitionArray::GetPrototypeTransitions() {
void TransitionArray::SetPrototypeTransitions(FixedArray* transitions,
WriteBarrierMode mode) {
- DCHECK(IsFullTransitionArray());
DCHECK(transitions->IsFixedArray());
- Heap* heap = GetHeap();
- WRITE_FIELD(this, kPrototypeTransitionsOffset, transitions);
- CONDITIONAL_WRITE_BARRIER(
- heap, this, kPrototypeTransitionsOffset, transitions, mode);
+ set(kPrototypeTransitionsIndex, transitions, mode);
}
Object** TransitionArray::GetPrototypeTransitionsSlot() {
- return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kPrototypeTransitionsOffset);
+ return RawFieldOfElementAt(kPrototypeTransitionsIndex);
}
Object** TransitionArray::GetKeySlot(int transition_number) {
- DCHECK(!IsSimpleTransition());
DCHECK(transition_number < number_of_transitions());
return RawFieldOfElementAt(ToKeyIndex(transition_number));
}
Name* TransitionArray::GetKey(int transition_number) {
- if (IsSimpleTransition()) {
- Map* target = GetTarget(kSimpleTransitionIndex);
- int descriptor = target->LastAdded();
- Name* key = target->instance_descriptors()->GetKey(descriptor);
- return key;
- }
DCHECK(transition_number < number_of_transitions());
return Name::cast(get(ToKeyIndex(transition_number)));
}
+Name* TransitionArray::GetKey(Object* raw_transitions, int transition_number) {
+ if (IsSimpleTransition(raw_transitions)) {
+ DCHECK(transition_number == 0);
+ return GetSimpleTransitionKey(GetSimpleTransition(raw_transitions));
+ }
+ DCHECK(IsFullTransitionArray(raw_transitions));
+ return TransitionArray::cast(raw_transitions)->GetKey(transition_number);
+}
+
+
void TransitionArray::SetKey(int transition_number, Name* key) {
- DCHECK(!IsSimpleTransition());
DCHECK(transition_number < number_of_transitions());
set(ToKeyIndex(transition_number), key);
}
Map* TransitionArray::GetTarget(int transition_number) {
- if (IsSimpleTransition()) {
- DCHECK(transition_number == kSimpleTransitionIndex);
- return Map::cast(get(kSimpleTransitionTarget));
- }
DCHECK(transition_number < number_of_transitions());
return Map::cast(get(ToTargetIndex(transition_number)));
}
-void TransitionArray::SetTarget(int transition_number, Map* value) {
- if (IsSimpleTransition()) {
- DCHECK(transition_number == kSimpleTransitionIndex);
- return set(kSimpleTransitionTarget, value);
+Map* TransitionArray::GetTarget(Object* raw_transitions,
+ int transition_number) {
+ if (IsSimpleTransition(raw_transitions)) {
+ DCHECK(transition_number == 0);
+ return GetSimpleTransition(raw_transitions);
}
- DCHECK(transition_number < number_of_transitions());
- set(ToTargetIndex(transition_number), value);
-}
-
-
-PropertyDetails TransitionArray::GetTargetDetails(int transition_number) {
- Map* map = GetTarget(transition_number);
- return map->GetLastDescriptorDetails();
+ DCHECK(IsFullTransitionArray(raw_transitions));
+ return TransitionArray::cast(raw_transitions)->GetTarget(transition_number);
}
-Object* TransitionArray::GetTargetValue(int transition_number) {
- Map* map = GetTarget(transition_number);
- return map->instance_descriptors()->GetValue(map->LastAdded());
+void TransitionArray::SetTarget(int transition_number, Map* value) {
+ DCHECK(transition_number < number_of_transitions());
+ set(ToTargetIndex(transition_number), value);
}
int TransitionArray::SearchName(Name* name, int* out_insertion_index) {
- if (IsSimpleTransition()) {
- Name* key = GetKey(kSimpleTransitionIndex);
- if (key->Equals(name)) return kSimpleTransitionIndex;
- if (out_insertion_index != NULL) {
- *out_insertion_index = key->Hash() > name->Hash() ? 0 : 1;
- }
- return kNotFound;
- }
return internal::Search<ALL_ENTRIES>(this, name, 0, out_insertion_index);
}
@@ -225,19 +169,10 @@ void TransitionArray::NoIncrementalWriteBarrierSet(int transition_number,
void TransitionArray::SetNumberOfTransitions(int number_of_transitions) {
- if (IsFullTransitionArray()) {
- DCHECK(number_of_transitions <= number_of_transitions_storage());
- WRITE_FIELD(this, kTransitionLengthOffset,
- Smi::FromInt(number_of_transitions));
- }
+ DCHECK(number_of_transitions <= Capacity(this));
+ set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
}
-
-#undef FIELD_ADDR
-#undef WRITE_FIELD
-#undef CONDITIONAL_WRITE_BARRIER
-
-
} } // namespace v8::internal
#endif // V8_TRANSITIONS_INL_H_
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 43fc90b1b5..2e65e387de 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -12,141 +12,110 @@ namespace v8 {
namespace internal {
-Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate,
- int number_of_transitions,
- int slack) {
- Handle<FixedArray> array = isolate->factory()->NewFixedArray(
- LengthFor(number_of_transitions + slack));
- array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
- array->set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
- return Handle<TransitionArray>::cast(array);
-}
-
-
-Handle<TransitionArray> TransitionArray::AllocateSimple(Isolate* isolate,
- Handle<Map> target) {
- Handle<FixedArray> array =
- isolate->factory()->NewFixedArray(kSimpleTransitionSize);
- array->set(kSimpleTransitionTarget, *target);
- return Handle<TransitionArray>::cast(array);
-}
-
-
-void TransitionArray::NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
- int origin_transition,
- int target_transition) {
- NoIncrementalWriteBarrierSet(target_transition,
- origin->GetKey(origin_transition),
- origin->GetTarget(origin_transition));
-}
-
-
-Handle<TransitionArray> TransitionArray::NewWith(Handle<Map> map,
- Handle<Name> name,
- Handle<Map> target,
- SimpleTransitionFlag flag) {
- Handle<TransitionArray> result;
- Isolate* isolate = name->GetIsolate();
-
- if (flag == SIMPLE_PROPERTY_TRANSITION) {
- result = AllocateSimple(isolate, target);
- } else {
- result = Allocate(isolate, 1);
- result->NoIncrementalWriteBarrierSet(0, *name, *target);
- }
- result->set_back_pointer_storage(map->GetBackPointer());
- return result;
-}
-
-
-Handle<TransitionArray> TransitionArray::ExtendToFullTransitionArray(
- Handle<Map> containing_map) {
- DCHECK(!containing_map->transitions()->IsFullTransitionArray());
- int nof = containing_map->transitions()->number_of_transitions();
-
- // A transition array may shrink during GC.
- Handle<TransitionArray> result = Allocate(containing_map->GetIsolate(), nof);
- DisallowHeapAllocation no_gc;
- int new_nof = containing_map->transitions()->number_of_transitions();
- if (new_nof != nof) {
- DCHECK(new_nof == 0);
- result->Shrink(ToKeyIndex(0));
- result->SetNumberOfTransitions(0);
- } else if (nof == 1) {
- result->NoIncrementalWriteBarrierCopyFrom(
- containing_map->transitions(), kSimpleTransitionIndex, 0);
+// static
+void TransitionArray::Insert(Handle<Map> map, Handle<Name> name,
+ Handle<Map> target, SimpleTransitionFlag flag) {
+ Isolate* isolate = map->GetIsolate();
+ target->SetBackPointer(*map);
+
+ // If the map doesn't have any transitions at all yet, install the new one.
+ if (CanStoreSimpleTransition(map->raw_transitions())) {
+ if (flag == SIMPLE_PROPERTY_TRANSITION) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(target);
+ ReplaceTransitions(map, *cell);
+ return;
+ }
+ // If the flag requires a full TransitionArray, allocate one.
+ Handle<TransitionArray> result = Allocate(isolate, 0, 1);
+ ReplaceTransitions(map, *result);
}
- result->set_back_pointer_storage(
- containing_map->transitions()->back_pointer_storage());
- return result;
-}
-
-
-Handle<TransitionArray> TransitionArray::Insert(Handle<Map> map,
- Handle<Name> name,
- Handle<Map> target,
- SimpleTransitionFlag flag) {
- if (!map->HasTransitionArray()) {
- return TransitionArray::NewWith(map, name, target, flag);
+ bool is_special_transition = flag == SPECIAL_TRANSITION;
+ // If the map has a simple transition, check if it should be overwritten.
+ if (IsSimpleTransition(map->raw_transitions())) {
+ Map* old_target = GetSimpleTransition(map->raw_transitions());
+ Name* key = GetSimpleTransitionKey(old_target);
+ PropertyDetails old_details = GetSimpleTargetDetails(old_target);
+ PropertyDetails new_details = is_special_transition
+ ? PropertyDetails::Empty()
+ : GetTargetDetails(*name, *target);
+ if (flag == SIMPLE_PROPERTY_TRANSITION && key->Equals(*name) &&
+ old_details.kind() == new_details.kind() &&
+ old_details.attributes() == new_details.attributes()) {
+ Handle<WeakCell> cell = Map::WeakCellForMap(target);
+ ReplaceTransitions(map, *cell);
+ return;
+ }
+ // Otherwise allocate a full TransitionArray with slack for a new entry.
+ Handle<TransitionArray> result = Allocate(isolate, 1, 1);
+ // Re-read existing data; the allocation might have caused it to be cleared.
+ if (IsSimpleTransition(map->raw_transitions())) {
+ old_target = GetSimpleTransition(map->raw_transitions());
+ result->NoIncrementalWriteBarrierSet(
+ 0, GetSimpleTransitionKey(old_target), old_target);
+ } else {
+ result->SetNumberOfTransitions(0);
+ }
+ ReplaceTransitions(map, *result);
}
- int number_of_transitions = map->transitions()->number_of_transitions();
- int new_nof = number_of_transitions;
+ // At this point, we know that the map has a full TransitionArray.
+ DCHECK(IsFullTransitionArray(map->raw_transitions()));
- bool is_special_transition = flag == SPECIAL_TRANSITION;
+ int number_of_transitions = 0;
+ int new_nof = 0;
+ int insertion_index = kNotFound;
DCHECK_EQ(is_special_transition, IsSpecialTransition(*name));
PropertyDetails details = is_special_transition
- ? PropertyDetails(NONE, DATA, 0)
+ ? PropertyDetails::Empty()
: GetTargetDetails(*name, *target);
- int insertion_index = kNotFound;
- int index =
- is_special_transition
- ? map->transitions()->SearchSpecial(Symbol::cast(*name),
- &insertion_index)
- : map->transitions()->Search(details.kind(), *name,
- details.attributes(), &insertion_index);
- if (index == kNotFound) {
- ++new_nof;
- } else {
- insertion_index = index;
- }
- DCHECK(insertion_index >= 0 && insertion_index <= number_of_transitions);
-
- CHECK(new_nof <= kMaxNumberOfTransitions);
-
- if (new_nof <= map->transitions()->number_of_transitions_storage()) {
+ {
DisallowHeapAllocation no_gc;
- TransitionArray* array = map->transitions();
+ TransitionArray* array = TransitionArray::cast(map->raw_transitions());
+ number_of_transitions = array->number_of_transitions();
+ new_nof = number_of_transitions;
+ int index =
+ is_special_transition
+ ? array->SearchSpecial(Symbol::cast(*name), &insertion_index)
+ : array->Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
+ // If an existing entry was found, overwrite it and return.
if (index != kNotFound) {
array->SetTarget(index, *target);
- return handle(array);
+ return;
}
- array->SetNumberOfTransitions(new_nof);
- for (index = number_of_transitions; index > insertion_index; --index) {
- Name* key = array->GetKey(index - 1);
- array->SetKey(index, key);
- array->SetTarget(index, array->GetTarget(index - 1));
+ ++new_nof;
+ CHECK(new_nof <= kMaxNumberOfTransitions);
+ DCHECK(insertion_index >= 0 && insertion_index <= number_of_transitions);
+
+ // If there is enough capacity, insert new entry into the existing array.
+ if (new_nof <= Capacity(array)) {
+ array->SetNumberOfTransitions(new_nof);
+ for (index = number_of_transitions; index > insertion_index; --index) {
+ array->SetKey(index, array->GetKey(index - 1));
+ array->SetTarget(index, array->GetTarget(index - 1));
+ }
+ array->SetKey(index, *name);
+ array->SetTarget(index, *target);
+ SLOW_DCHECK(array->IsSortedNoDuplicates());
+ return;
}
- array->SetKey(index, *name);
- array->SetTarget(index, *target);
- SLOW_DCHECK(array->IsSortedNoDuplicates());
- return handle(array);
}
+ // We're gonna need a bigger TransitionArray.
Handle<TransitionArray> result = Allocate(
map->GetIsolate(), new_nof,
Map::SlackForArraySize(number_of_transitions, kMaxNumberOfTransitions));
- // The map's transition array may grown smaller during the allocation above as
+ // The map's transition array may have shrunk during the allocation above as
// it was weakly traversed, though it is guaranteed not to disappear. Trim the
// result copy if needed, and recompute variables.
- DCHECK(map->HasTransitionArray());
+ DCHECK(IsFullTransitionArray(map->raw_transitions()));
DisallowHeapAllocation no_gc;
- TransitionArray* array = map->transitions();
+ TransitionArray* array = TransitionArray::cast(map->raw_transitions());
if (array->number_of_transitions() != number_of_transitions) {
DCHECK(array->number_of_transitions() < number_of_transitions);
@@ -154,11 +123,11 @@ Handle<TransitionArray> TransitionArray::Insert(Handle<Map> map,
new_nof = number_of_transitions;
insertion_index = kNotFound;
- index = is_special_transition ? map->transitions()->SearchSpecial(
- Symbol::cast(*name), &insertion_index)
- : map->transitions()->Search(
- details.kind(), *name,
- details.attributes(), &insertion_index);
+ int index =
+ is_special_transition
+ ? array->SearchSpecial(Symbol::cast(*name), &insertion_index)
+ : array->Search(details.kind(), *name, details.attributes(),
+ &insertion_index);
if (index == kNotFound) {
++new_nof;
} else {
@@ -183,12 +152,332 @@ Handle<TransitionArray> TransitionArray::Insert(Handle<Map> map,
result->NoIncrementalWriteBarrierCopyFrom(array, i, i + 1);
}
- result->set_back_pointer_storage(array->back_pointer_storage());
SLOW_DCHECK(result->IsSortedNoDuplicates());
- return result;
+ ReplaceTransitions(map, *result);
+}
+
+
+// static
+Map* TransitionArray::SearchTransition(Map* map, PropertyKind kind, Name* name,
+ PropertyAttributes attributes) {
+ Object* raw_transitions = map->raw_transitions();
+ if (IsSimpleTransition(raw_transitions)) {
+ Map* target = GetSimpleTransition(raw_transitions);
+ Name* key = GetSimpleTransitionKey(target);
+ if (!key->Equals(name)) return NULL;
+ PropertyDetails details = GetSimpleTargetDetails(target);
+ if (details.attributes() != attributes) return NULL;
+ if (details.kind() != kind) return NULL;
+ return target;
+ }
+ if (IsFullTransitionArray(raw_transitions)) {
+ TransitionArray* transitions = TransitionArray::cast(raw_transitions);
+ int transition = transitions->Search(kind, name, attributes);
+ if (transition == kNotFound) return NULL;
+ return transitions->GetTarget(transition);
+ }
+ return NULL;
}
+// static
+Map* TransitionArray::SearchSpecial(Map* map, Symbol* name) {
+ Object* raw_transitions = map->raw_transitions();
+ if (IsFullTransitionArray(raw_transitions)) {
+ TransitionArray* transitions = TransitionArray::cast(raw_transitions);
+ int transition = transitions->SearchSpecial(name);
+ if (transition == kNotFound) return NULL;
+ return transitions->GetTarget(transition);
+ }
+ return NULL;
+}
+
+
+// static
+Handle<Map> TransitionArray::FindTransitionToField(Handle<Map> map,
+ Handle<Name> name) {
+ DisallowHeapAllocation no_gc;
+ Map* target = SearchTransition(*map, kData, *name, NONE);
+ if (target == NULL) return Handle<Map>::null();
+ PropertyDetails details = target->GetLastDescriptorDetails();
+ DCHECK_EQ(NONE, details.attributes());
+ if (details.type() != DATA) return Handle<Map>::null();
+ return Handle<Map>(target);
+}
+
+
+// static
+Handle<String> TransitionArray::ExpectedTransitionKey(Handle<Map> map) {
+ DisallowHeapAllocation no_gc;
+ Object* raw_transition = map->raw_transitions();
+ if (!IsSimpleTransition(raw_transition)) return Handle<String>::null();
+ Map* target = GetSimpleTransition(raw_transition);
+ PropertyDetails details = GetSimpleTargetDetails(target);
+ if (details.type() != DATA) return Handle<String>::null();
+ if (details.attributes() != NONE) return Handle<String>::null();
+ Name* name = GetSimpleTransitionKey(target);
+ if (!name->IsString()) return Handle<String>::null();
+ return Handle<String>(String::cast(name));
+}
+
+
+// static
+bool TransitionArray::CanHaveMoreTransitions(Handle<Map> map) {
+ Object* raw_transitions = map->raw_transitions();
+ if (IsFullTransitionArray(raw_transitions)) {
+ TransitionArray* transitions = TransitionArray::cast(raw_transitions);
+ return transitions->number_of_transitions() < kMaxNumberOfTransitions;
+ }
+ return true;
+}
+
+
+// static
+Handle<Map> TransitionArray::PutPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype,
+ Handle<Map> target_map) {
+ DCHECK(HeapObject::cast(*prototype)->map()->IsMap());
+ // Don't cache prototype transition if this map is either shared, or a map of
+ // a prototype.
+ if (map->is_prototype_map()) return map;
+ if (map->is_dictionary_map() || !FLAG_cache_prototype_transitions) return map;
+
+ const int header = kProtoTransitionHeaderSize;
+
+ Handle<FixedArray> cache(GetPrototypeTransitions(*map));
+ int capacity = cache->length() - header;
+ int transitions = NumberOfPrototypeTransitions(*cache) + 1;
+
+ if (transitions > capacity) {
+ // Grow array by factor 2 up to MaxCachedPrototypeTransitions.
+ int new_capacity = Min(kMaxCachedPrototypeTransitions, transitions * 2);
+ if (new_capacity == capacity) return map;
+
+ cache = FixedArray::CopySize(cache, header + new_capacity);
+ if (capacity < 0) {
+ // There was no prototype transitions array before, so the size
+ // couldn't be copied. Initialize it explicitly.
+ SetNumberOfPrototypeTransitions(*cache, 0);
+ }
+
+ SetPrototypeTransitions(map, cache);
+ }
+
+ // Reload number of transitions as GC might shrink them.
+ int last = NumberOfPrototypeTransitions(*cache);
+ int entry = header + last;
+
+ cache->set(entry, *target_map);
+ SetNumberOfPrototypeTransitions(*cache, last + 1);
+
+ return map;
+}
+
+
+// static
+Handle<Map> TransitionArray::GetPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype) {
+ DisallowHeapAllocation no_gc;
+ FixedArray* cache = GetPrototypeTransitions(*map);
+ int number_of_transitions = NumberOfPrototypeTransitions(cache);
+ for (int i = 0; i < number_of_transitions; i++) {
+ Map* target = Map::cast(cache->get(kProtoTransitionHeaderSize + i));
+ if (target->prototype() == *prototype) return handle(target);
+ }
+ return Handle<Map>();
+}
+
+
+// static
+FixedArray* TransitionArray::GetPrototypeTransitions(Map* map) {
+ Object* raw_transitions = map->raw_transitions();
+ Heap* heap = map->GetHeap();
+ if (!IsFullTransitionArray(raw_transitions)) {
+ return heap->empty_fixed_array();
+ }
+ TransitionArray* transitions = TransitionArray::cast(raw_transitions);
+ if (!transitions->HasPrototypeTransitions()) {
+ return heap->empty_fixed_array();
+ }
+ return transitions->GetPrototypeTransitions();
+}
+
+
+// static
+void TransitionArray::SetNumberOfPrototypeTransitions(
+ FixedArray* proto_transitions, int value) {
+ DCHECK(proto_transitions->length() != 0);
+ proto_transitions->set(kProtoTransitionNumberOfEntriesOffset,
+ Smi::FromInt(value));
+}
+
+
+// static
+int TransitionArray::NumberOfTransitions(Object* raw_transitions) {
+ if (CanStoreSimpleTransition(raw_transitions)) return 0;
+ if (IsSimpleTransition(raw_transitions)) return 1;
+ DCHECK(IsFullTransitionArray(raw_transitions));
+ return TransitionArray::cast(raw_transitions)->number_of_transitions();
+}
+
+
+// static
+int TransitionArray::Capacity(Object* raw_transitions) {
+ if (!IsFullTransitionArray(raw_transitions)) return 1;
+ TransitionArray* t = TransitionArray::cast(raw_transitions);
+ if (t->length() <= kFirstIndex) return 0;
+ return (t->length() - kFirstIndex) / kTransitionSize;
+}
+
+
+// Private static helper functions.
+
+Handle<TransitionArray> TransitionArray::Allocate(Isolate* isolate,
+ int number_of_transitions,
+ int slack) {
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(
+ LengthFor(number_of_transitions + slack));
+ array->set(kPrototypeTransitionsIndex, Smi::FromInt(0));
+ array->set(kTransitionLengthIndex, Smi::FromInt(number_of_transitions));
+ return Handle<TransitionArray>::cast(array);
+}
+
+
+void TransitionArray::NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin,
+ int origin_transition,
+ int target_transition) {
+ NoIncrementalWriteBarrierSet(target_transition,
+ origin->GetKey(origin_transition),
+ origin->GetTarget(origin_transition));
+}
+
+
+static void ZapTransitionArray(TransitionArray* transitions) {
+ MemsetPointer(transitions->data_start(),
+ transitions->GetHeap()->the_hole_value(),
+ transitions->length());
+}
+
+
+void TransitionArray::ReplaceTransitions(Handle<Map> map,
+ Object* new_transitions) {
+ Object* raw_transitions = map->raw_transitions();
+ if (IsFullTransitionArray(raw_transitions)) {
+ TransitionArray* old_transitions = TransitionArray::cast(raw_transitions);
+#ifdef DEBUG
+ CheckNewTransitionsAreConsistent(map, old_transitions, new_transitions);
+ DCHECK(old_transitions != new_transitions);
+#endif
+ // Transition arrays are not shared. When one is replaced, it should not
+ // keep referenced objects alive, so we zap it.
+ // When there is another reference to the array somewhere (e.g. a handle),
+ // not zapping turns from a waste of memory into a source of crashes.
+ ZapTransitionArray(old_transitions);
+ }
+ map->set_raw_transitions(new_transitions);
+}
+
+
+static void ZapPrototypeTransitions(Object* raw_transitions) {
+ DCHECK(TransitionArray::IsFullTransitionArray(raw_transitions));
+ TransitionArray* transitions = TransitionArray::cast(raw_transitions);
+ if (!transitions->HasPrototypeTransitions()) return;
+ FixedArray* proto_transitions = transitions->GetPrototypeTransitions();
+ MemsetPointer(proto_transitions->data_start(),
+ proto_transitions->GetHeap()->the_hole_value(),
+ proto_transitions->length());
+}
+
+
+void TransitionArray::SetPrototypeTransitions(
+ Handle<Map> map, Handle<FixedArray> proto_transitions) {
+ EnsureHasFullTransitionArray(map);
+ if (Heap::ShouldZapGarbage()) {
+ Object* raw_transitions = map->raw_transitions();
+ DCHECK(raw_transitions != *proto_transitions);
+ ZapPrototypeTransitions(raw_transitions);
+ }
+ TransitionArray* transitions = TransitionArray::cast(map->raw_transitions());
+ transitions->SetPrototypeTransitions(*proto_transitions);
+}
+
+
+void TransitionArray::EnsureHasFullTransitionArray(Handle<Map> map) {
+ Object* raw_transitions = map->raw_transitions();
+ if (IsFullTransitionArray(raw_transitions)) return;
+ int nof = IsSimpleTransition(raw_transitions) ? 1 : 0;
+ Handle<TransitionArray> result = Allocate(map->GetIsolate(), nof);
+ DisallowHeapAllocation no_gc;
+ // Reload pointer after the allocation that just happened.
+ raw_transitions = map->raw_transitions();
+ int new_nof = IsSimpleTransition(raw_transitions) ? 1 : 0;
+ if (new_nof != nof) {
+ DCHECK(new_nof == 0);
+ result->Shrink(ToKeyIndex(0));
+ result->SetNumberOfTransitions(0);
+ } else if (nof == 1) {
+ Map* target = GetSimpleTransition(raw_transitions);
+ Name* key = GetSimpleTransitionKey(target);
+ result->NoIncrementalWriteBarrierSet(0, key, target);
+ }
+ ReplaceTransitions(map, *result);
+}
+
+
+void TransitionArray::TraverseTransitionTreeInternal(Map* map,
+ TraverseCallback callback,
+ void* data) {
+ Object* raw_transitions = map->raw_transitions();
+ if (IsFullTransitionArray(raw_transitions)) {
+ TransitionArray* transitions = TransitionArray::cast(raw_transitions);
+ if (transitions->HasPrototypeTransitions()) {
+ FixedArray* proto_trans = transitions->GetPrototypeTransitions();
+ for (int i = 0; i < NumberOfPrototypeTransitions(proto_trans); ++i) {
+ int index = TransitionArray::kProtoTransitionHeaderSize + i;
+ TraverseTransitionTreeInternal(Map::cast(proto_trans->get(index)),
+ callback, data);
+ }
+ }
+ for (int i = 0; i < transitions->number_of_transitions(); ++i) {
+ TraverseTransitionTreeInternal(transitions->GetTarget(i), callback, data);
+ }
+ } else if (IsSimpleTransition(raw_transitions)) {
+ TraverseTransitionTreeInternal(GetSimpleTransition(raw_transitions),
+ callback, data);
+ }
+ callback(map, data);
+}
+
+
+#ifdef DEBUG
+void TransitionArray::CheckNewTransitionsAreConsistent(
+ Handle<Map> map, TransitionArray* old_transitions, Object* transitions) {
+ // This function only handles full transition arrays.
+ DCHECK(IsFullTransitionArray(transitions));
+ TransitionArray* new_transitions = TransitionArray::cast(transitions);
+ for (int i = 0; i < old_transitions->number_of_transitions(); i++) {
+ Map* target = old_transitions->GetTarget(i);
+ if (target->instance_descriptors() == map->instance_descriptors()) {
+ Name* key = old_transitions->GetKey(i);
+ int new_target_index;
+ if (TransitionArray::IsSpecialTransition(key)) {
+ new_target_index = new_transitions->SearchSpecial(Symbol::cast(key));
+ } else {
+ PropertyDetails details =
+ TransitionArray::GetTargetDetails(key, target);
+ new_target_index =
+ new_transitions->Search(details.kind(), key, details.attributes());
+ }
+ DCHECK_NE(TransitionArray::kNotFound, new_target_index);
+ DCHECK_EQ(target, new_transitions->GetTarget(new_target_index));
+ }
+ }
+}
+#endif
+
+
+// Private non-static helper functions (operating on full transition arrays).
+
int TransitionArray::SearchDetails(int transition, PropertyKind kind,
PropertyAttributes attributes,
int* out_insertion_index) {
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 999ad86c55..1cb91a222e 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -16,47 +16,99 @@ namespace internal {
// TransitionArrays are fixed arrays used to hold map transitions for property,
-// constant, and element changes. They can either be simple transition arrays
-// that store a single property transition, or a full transition array that has
+// constant, and element changes. "Simple" transitions storing only a single
+// property transition are stored inline (i.e. the target map is stored
+// directly); otherwise a full transition array is used that has
// prototype transitions and multiple property transitons. The details related
// to property transitions are accessed in the descriptor array of the target
// map. In the case of a simple transition, the key is also read from the
// descriptor array of the target map.
//
-// The simple format of the these objects is:
-// [0] Undefined or back pointer map
-// [1] Single transition
+// This class provides a static interface that operates directly on maps
+// and handles the distinction between simple and full transitions storage.
//
// The full format is:
-// [0] Undefined or back pointer map
-// [1] Smi(0) or fixed array of prototype transitions
-// [2] Number of transitions
-// [3] First transition
-// [3 + number of transitions * kTransitionSize]: start of slack
+// [0] Smi(0) or fixed array of prototype transitions
+// [1] Number of transitions
+// [2] First transition
+// [2 + number of transitions * kTransitionSize]: start of slack
class TransitionArray: public FixedArray {
public:
- // Accessors for fetching instance transition at transition number.
- inline Name* GetKey(int transition_number);
- inline void SetKey(int transition_number, Name* value);
- inline Object** GetKeySlot(int transition_number);
- int GetSortedKeyIndex(int transition_number) { return transition_number; }
+ // Insert a new transition into |map|'s transition array, extending it
+ // as necessary.
+ static void Insert(Handle<Map> map, Handle<Name> name, Handle<Map> target,
+ SimpleTransitionFlag flag);
- Name* GetSortedKey(int transition_number) {
- return GetKey(transition_number);
- }
+ static Map* SearchTransition(Map* map, PropertyKind kind, Name* name,
+ PropertyAttributes attributes);
- inline Map* GetTarget(int transition_number);
- inline void SetTarget(int transition_number, Map* target);
+ static Map* SearchSpecial(Map* map, Symbol* name);
- inline PropertyDetails GetTargetDetails(int transition_number);
- inline Object* GetTargetValue(int transition_number);
+ static Handle<Map> FindTransitionToField(Handle<Map> map, Handle<Name> name);
- inline bool HasElementsTransition();
+ static Handle<String> ExpectedTransitionKey(Handle<Map> map);
- inline Object* back_pointer_storage();
- inline void set_back_pointer_storage(
- Object* back_pointer,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ static Handle<Map> ExpectedTransitionTarget(Handle<Map> map) {
+ DCHECK(!ExpectedTransitionKey(map).is_null());
+ return Handle<Map>(GetSimpleTransition(map->raw_transitions()));
+ }
+ // Returns true if |raw_transition| can be overwritten with a simple
+ // transition (because it's either uninitialized, or has been cleared).
+ static inline bool CanStoreSimpleTransition(Object* raw_transition) {
+ return raw_transition->IsSmi() ||
+ (raw_transition->IsWeakCell() &&
+ WeakCell::cast(raw_transition)->cleared());
+ }
+ static inline bool IsSimpleTransition(Object* raw_transition) {
+ DCHECK(!raw_transition->IsWeakCell() ||
+ WeakCell::cast(raw_transition)->cleared() ||
+ WeakCell::cast(raw_transition)->value()->IsMap());
+ return raw_transition->IsWeakCell() &&
+ !WeakCell::cast(raw_transition)->cleared();
+ }
+ static inline Map* GetSimpleTransition(Object* raw_transition) {
+ DCHECK(IsSimpleTransition(raw_transition));
+ DCHECK(raw_transition->IsWeakCell());
+ return Map::cast(WeakCell::cast(raw_transition)->value());
+ }
+ static inline bool IsFullTransitionArray(Object* raw_transitions) {
+ return raw_transitions->IsTransitionArray();
+ }
+
+ // The size of transition arrays are limited so they do not end up in large
+ // object space. Otherwise ClearNonLiveReferences would leak memory while
+ // applying in-place right trimming.
+ static bool CanHaveMoreTransitions(Handle<Map> map);
+
+ // ===== PROTOTYPE TRANSITIONS =====
+ // When you set the prototype of an object using the __proto__ accessor you
+ // need a new map for the object (the prototype is stored in the map). In
+ // order not to multiply maps unnecessarily we store these as transitions in
+ // the original map. That way we can transition to the same map if the same
+ // prototype is set, rather than creating a new map every time. The
+ // transitions are in the form of a map where the keys are prototype objects
+ // and the values are the maps they transition to.
+ // Cache format:
+ // 0: finger - index of the first free cell in the cache
+ // 1 + i: target map
+ static const int kMaxCachedPrototypeTransitions = 256;
+ static Handle<Map> PutPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype,
+ Handle<Map> target_map);
+
+ static Handle<Map> GetPrototypeTransition(Handle<Map> map,
+ Handle<Object> prototype);
+
+ static FixedArray* GetPrototypeTransitions(Map* map);
+
+ static int NumberOfPrototypeTransitions(FixedArray* proto_transitions) {
+ if (proto_transitions->length() == 0) return 0;
+ Object* raw = proto_transitions->get(kProtoTransitionNumberOfEntriesOffset);
+ return Smi::cast(raw)->value();
+ }
+
+ static void SetNumberOfPrototypeTransitions(FixedArray* proto_transitions,
+ int value);
inline FixedArray* GetPrototypeTransitions();
inline void SetPrototypeTransitions(
@@ -65,133 +117,92 @@ class TransitionArray: public FixedArray {
inline Object** GetPrototypeTransitionsSlot();
inline bool HasPrototypeTransitions();
- // Returns the number of transitions in the array.
- int number_of_transitions() {
- if (IsSimpleTransition()) return 1;
- if (length() <= kFirstIndex) return 0;
- return Smi::cast(get(kTransitionLengthIndex))->value();
- }
+ // ===== ITERATION =====
- int number_of_transitions_storage() {
- if (IsSimpleTransition()) return 1;
- if (length() <= kFirstIndex) return 0;
- return (length() - kFirstIndex) / kTransitionSize;
- }
+ typedef void (*TraverseCallback)(Map* map, void* data);
- int NumberOfSlackTransitions() {
- return number_of_transitions_storage() - number_of_transitions();
+ // Traverse the transition tree in postorder.
+ static void TraverseTransitionTree(Map* map, TraverseCallback callback,
+ void* data) {
+ // Make sure that we do not allocate in the callback.
+ DisallowHeapAllocation no_allocation;
+ TraverseTransitionTreeInternal(map, callback, data);
}
- inline void SetNumberOfTransitions(int number_of_transitions);
- inline int number_of_entries() { return number_of_transitions(); }
+ // ===== LOW-LEVEL ACCESSORS =====
- // Creates a FullTransitionArray from a SimpleTransitionArray in
- // containing_map.
- static Handle<TransitionArray> ExtendToFullTransitionArray(
- Handle<Map> containing_map);
-
- // Return a transition array, using the array from the owning map if it
- // already has one (copying into a larger array if necessary), otherwise
- // creating a new one according to flag.
- // TODO(verwaest): This should not cause an existing transition to be
- // overwritten.
- static Handle<TransitionArray> Insert(Handle<Map> map, Handle<Name> name,
- Handle<Map> target,
- SimpleTransitionFlag flag);
- // Search a transition for a given kind, property name and attributes.
- int Search(PropertyKind kind, Name* name, PropertyAttributes attributes,
- int* out_insertion_index = NULL);
+ // Accessors for fetching instance transition at transition number.
+ static inline Name* GetKey(Object* raw_transitions, int transition_number);
+ inline Name* GetKey(int transition_number);
+ inline void SetKey(int transition_number, Name* value);
+ inline Object** GetKeySlot(int transition_number);
+ int GetSortedKeyIndex(int transition_number) { return transition_number; }
- // Search a non-property transition (like elements kind, observe or frozen
- // transitions).
- inline int SearchSpecial(Symbol* symbol, int* out_insertion_index = NULL) {
- return SearchName(symbol, out_insertion_index);
+ Name* GetSortedKey(int transition_number) {
+ return GetKey(transition_number);
}
+ static inline Map* GetTarget(Object* raw_transitions, int transition_number);
+ inline Map* GetTarget(int transition_number);
+ inline void SetTarget(int transition_number, Map* target);
+
static inline PropertyDetails GetTargetDetails(Name* name, Map* target);
- // Allocates a TransitionArray.
- static Handle<TransitionArray> Allocate(Isolate* isolate,
- int number_of_transitions,
- int slack = 0);
+ // Returns the number of transitions in the array.
+ static int NumberOfTransitions(Object* raw_transitions);
+ // Required for templatized Search interface.
+ inline int number_of_entries() { return number_of_transitions(); }
- bool IsSimpleTransition() {
- return length() == kSimpleTransitionSize &&
- get(kSimpleTransitionTarget)->IsHeapObject() &&
- // The IntrusivePrototypeTransitionIterator may have set the map of the
- // prototype transitions array to a smi. In that case, there are
- // prototype transitions, hence this transition array is a full
- // transition array.
- HeapObject::cast(get(kSimpleTransitionTarget))->map()->IsMap() &&
- get(kSimpleTransitionTarget)->IsMap();
- }
+ inline void SetNumberOfTransitions(int number_of_transitions);
- bool IsFullTransitionArray() {
- return length() > kFirstIndex ||
- (length() == kFirstIndex && !IsSimpleTransition());
- }
+ static int Capacity(Object* raw_transitions);
// Casting.
static inline TransitionArray* cast(Object* obj);
- // Constant for denoting key was not found.
- static const int kNotFound = -1;
-
- static const int kBackPointerStorageIndex = 0;
-
- // Layout for full transition arrays.
- static const int kPrototypeTransitionsIndex = 1;
- static const int kTransitionLengthIndex = 2;
- static const int kFirstIndex = 3;
-
- // Layout for simple transition arrays.
- static const int kSimpleTransitionTarget = 1;
- static const int kSimpleTransitionSize = 2;
- static const int kSimpleTransitionIndex = 0;
- STATIC_ASSERT(kSimpleTransitionIndex != kNotFound);
-
- static const int kBackPointerStorageOffset = FixedArray::kHeaderSize;
-
- // Layout for the full transition array header.
- static const int kPrototypeTransitionsOffset = kBackPointerStorageOffset +
- kPointerSize;
- static const int kTransitionLengthOffset =
- kPrototypeTransitionsOffset + kPointerSize;
-
- // Layout of map transition entries in full transition arrays.
- static const int kTransitionKey = 0;
- static const int kTransitionTarget = 1;
static const int kTransitionSize = 2;
+ static const int kProtoTransitionHeaderSize = 1;
#if defined(DEBUG) || defined(OBJECT_PRINT)
// For our gdb macros, we should perhaps change these in the future.
void Print();
// Print all the transitions.
- void PrintTransitions(std::ostream& os, bool print_header = true); // NOLINT
+ static void PrintTransitions(std::ostream& os, Object* transitions,
+ bool print_header = true); // NOLINT
#endif
#ifdef DEBUG
bool IsSortedNoDuplicates(int valid_entries = -1);
- bool IsConsistentWithBackPointers(Map* current_map);
- bool IsEqualTo(TransitionArray* other);
+ static bool IsSortedNoDuplicates(Map* map);
+ static bool IsConsistentWithBackPointers(Map* map);
// Returns true for a non-property transitions like elements kind, observed
// or frozen transitions.
static inline bool IsSpecialTransition(Name* name);
#endif
+ // Constant for denoting key was not found.
+ static const int kNotFound = -1;
+
// The maximum number of transitions we want in a transition array (should
// fit in a page).
static const int kMaxNumberOfTransitions = 1024 + 512;
- // Returns the fixed array length required to hold number_of_transitions
- // transitions.
- static int LengthFor(int number_of_transitions) {
- return ToKeyIndex(number_of_transitions);
- }
-
private:
+ // Layout for full transition arrays.
+ static const int kPrototypeTransitionsIndex = 0;
+ static const int kTransitionLengthIndex = 1;
+ static const int kFirstIndex = 2;
+
+ // Layout of map transition entries in full transition arrays.
+ static const int kTransitionKey = 0;
+ static const int kTransitionTarget = 1;
+ STATIC_ASSERT(kTransitionSize == 2);
+
+ static const int kProtoTransitionNumberOfEntriesOffset = 0;
+ STATIC_ASSERT(kProtoTransitionHeaderSize == 1);
+
// Conversion from transition number to array indices.
static int ToKeyIndex(int transition_number) {
return kFirstIndex +
@@ -205,20 +216,55 @@ class TransitionArray: public FixedArray {
kTransitionTarget;
}
- static Handle<TransitionArray> AllocateSimple(
- Isolate* isolate, Handle<Map> target);
+ // Returns the fixed array length required to hold number_of_transitions
+ // transitions.
+ static int LengthFor(int number_of_transitions) {
+ return ToKeyIndex(number_of_transitions);
+ }
+
+ // Allocates a TransitionArray.
+ static Handle<TransitionArray> Allocate(Isolate* isolate,
+ int number_of_transitions,
+ int slack = 0);
+
+ static void EnsureHasFullTransitionArray(Handle<Map> map);
+ static void ReplaceTransitions(Handle<Map> map, Object* new_transitions);
- // Allocate a new transition array with a single entry.
- static Handle<TransitionArray> NewWith(Handle<Map> map,
- Handle<Name> name,
- Handle<Map> target,
- SimpleTransitionFlag flag);
+ // Search a transition for a given kind, property name and attributes.
+ int Search(PropertyKind kind, Name* name, PropertyAttributes attributes,
+ int* out_insertion_index = NULL);
+ // Search a non-property transition (like elements kind, observe or frozen
+ // transitions).
+ inline int SearchSpecial(Symbol* symbol, int* out_insertion_index = NULL) {
+ return SearchName(symbol, out_insertion_index);
+ }
// Search a first transition for a given property name.
inline int SearchName(Name* name, int* out_insertion_index = NULL);
int SearchDetails(int transition, PropertyKind kind,
PropertyAttributes attributes, int* out_insertion_index);
+ int number_of_transitions() {
+ if (length() < kFirstIndex) return 0;
+ return Smi::cast(get(kTransitionLengthIndex))->value();
+ }
+
+ static inline PropertyDetails GetSimpleTargetDetails(Map* transition) {
+ return transition->GetLastDescriptorDetails();
+ }
+
+ static inline Name* GetSimpleTransitionKey(Map* transition) {
+ int descriptor = transition->LastAdded();
+ return transition->instance_descriptors()->GetKey(descriptor);
+ }
+
+ static void TraverseTransitionTreeInternal(Map* map,
+ TraverseCallback callback,
+ void* data);
+
+ static void SetPrototypeTransitions(Handle<Map> map,
+ Handle<FixedArray> proto_transitions);
+
// Compares two tuples <key, kind, attributes>, returns -1 if
// tuple1 is "less" than tuple2, 0 if tuple1 equal to tuple2 and 1 otherwise.
static inline int CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
@@ -247,6 +293,12 @@ class TransitionArray: public FixedArray {
int origin_transition,
int target_transition);
+#ifdef DEBUG
+ static void CheckNewTransitionsAreConsistent(Handle<Map> map,
+ TransitionArray* old_transitions,
+ Object* transitions);
+#endif
+
DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray);
};
diff --git a/deps/v8/src/type-feedback-vector.cc b/deps/v8/src/type-feedback-vector.cc
index b0be315b2b..6653bea0ee 100644
--- a/deps/v8/src/type-feedback-vector.cc
+++ b/deps/v8/src/type-feedback-vector.cc
@@ -75,15 +75,22 @@ void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot, Code::Kind kind) {
}
+template Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
+ Isolate* isolate, const FeedbackVectorSpec* spec);
+template Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
+ Isolate* isolate, const ZoneFeedbackVectorSpec* spec);
+
+
// static
-Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
- Isolate* isolate, const FeedbackVectorSpec& spec) {
- const int slot_count = spec.slots();
- const int ic_slot_count = spec.ic_slots();
+template <typename Spec>
+Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
+ const Spec* spec) {
+ const int slot_count = spec->slots();
+ const int ic_slot_count = spec->ic_slots();
const int index_count =
FLAG_vector_ics ? VectorICComputer::word_count(ic_slot_count) : 0;
- const int length =
- slot_count + ic_slot_count + index_count + kReservedIndexCount;
+ const int length = slot_count + (ic_slot_count * elements_per_ic_slot()) +
+ index_count + kReservedIndexCount;
if (length == kReservedIndexCount) {
return Handle<TypeFeedbackVector>::cast(
isolate->factory()->empty_fixed_array());
@@ -113,7 +120,7 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
Handle<TypeFeedbackVector> vector = Handle<TypeFeedbackVector>::cast(array);
if (FLAG_vector_ics) {
for (int i = 0; i < ic_slot_count; i++) {
- vector->SetKind(FeedbackVectorICSlot(i), spec.GetKind(i));
+ vector->SetKind(FeedbackVectorICSlot(i), spec->GetKind(i));
}
}
return vector;
@@ -207,16 +214,28 @@ Handle<FixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
}
-void FeedbackNexus::InstallHandlers(int start_index, MapHandleList* maps,
- CodeHandleList* handlers) {
+Handle<FixedArray> FeedbackNexus::EnsureExtraArrayOfSize(int length) {
Isolate* isolate = GetIsolate();
- Handle<FixedArray> array = handle(FixedArray::cast(GetFeedback()), isolate);
+ Handle<Object> feedback_extra = handle(GetFeedbackExtra(), isolate);
+ if (!feedback_extra->IsFixedArray() ||
+ FixedArray::cast(*feedback_extra)->length() != length) {
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
+ SetFeedbackExtra(*array);
+ return array;
+ }
+ return Handle<FixedArray>::cast(feedback_extra);
+}
+
+
+void FeedbackNexus::InstallHandlers(Handle<FixedArray> array,
+ MapHandleList* maps,
+ CodeHandleList* handlers) {
int receiver_count = maps->length();
for (int current = 0; current < receiver_count; ++current) {
Handle<Map> map = maps->at(current);
Handle<WeakCell> cell = Map::WeakCellForMap(map);
- array->set(start_index + (current * 2), *cell);
- array->set(start_index + (current * 2 + 1), *handlers->at(current));
+ array->set(current * 2, *cell);
+ array->set(current * 2 + 1, *handlers->at(current));
}
}
@@ -224,6 +243,7 @@ void FeedbackNexus::InstallHandlers(int start_index, MapHandleList* maps,
InlineCacheState LoadICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
+
if (feedback == *vector()->UninitializedSentinel(isolate)) {
return UNINITIALIZED;
} else if (feedback == *vector()->MegamorphicSentinel(isolate)) {
@@ -233,10 +253,10 @@ InlineCacheState LoadICNexus::StateFromFeedback() const {
} else if (feedback->IsFixedArray()) {
// Determine state purely by our structure, don't check if the maps are
// cleared.
- FixedArray* array = FixedArray::cast(feedback);
- int length = array->length();
- DCHECK(length >= 2);
- return length == 2 ? MONOMORPHIC : POLYMORPHIC;
+ return POLYMORPHIC;
+ } else if (feedback->IsWeakCell()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
}
return UNINITIALIZED;
@@ -246,6 +266,7 @@ InlineCacheState LoadICNexus::StateFromFeedback() const {
InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
+
if (feedback == *vector()->UninitializedSentinel(isolate)) {
return UNINITIALIZED;
} else if (feedback == *vector()->PremonomorphicSentinel(isolate)) {
@@ -255,10 +276,14 @@ InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
} else if (feedback->IsFixedArray()) {
// Determine state purely by our structure, don't check if the maps are
// cleared.
- FixedArray* array = FixedArray::cast(feedback);
- int length = array->length();
- DCHECK(length >= 3);
- return length == 3 ? MONOMORPHIC : POLYMORPHIC;
+ return POLYMORPHIC;
+ } else if (feedback->IsWeakCell()) {
+ // Don't check if the map is cleared.
+ return MONOMORPHIC;
+ } else if (feedback->IsName()) {
+ Object* extra = GetFeedbackExtra();
+ FixedArray* extra_array = FixedArray::cast(extra);
+ return extra_array->length() > 2 ? POLYMORPHIC : MONOMORPHIC;
}
return UNINITIALIZED;
@@ -268,6 +293,8 @@ InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
InlineCacheState CallICNexus::StateFromFeedback() const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
+ DCHECK(!FLAG_vector_ics ||
+ GetFeedbackExtra() == *vector()->UninitializedSentinel(isolate));
if (feedback == *vector()->MegamorphicSentinel(isolate)) {
return GENERIC;
@@ -311,56 +338,68 @@ void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
void KeyedLoadICNexus::ConfigureMegamorphic() {
- SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
+ Isolate* isolate = GetIsolate();
+ SetFeedback(*vector()->MegamorphicSentinel(isolate), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*vector()->UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
}
void LoadICNexus::ConfigureMegamorphic() {
SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*vector()->UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
}
void LoadICNexus::ConfigurePremonomorphic() {
SetFeedback(*vector()->PremonomorphicSentinel(GetIsolate()),
SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*vector()->UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
}
void KeyedLoadICNexus::ConfigurePremonomorphic() {
- SetFeedback(*vector()->PremonomorphicSentinel(GetIsolate()),
- SKIP_WRITE_BARRIER);
+ Isolate* isolate = GetIsolate();
+ SetFeedback(*vector()->PremonomorphicSentinel(isolate), SKIP_WRITE_BARRIER);
+ SetFeedbackExtra(*vector()->UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
}
void LoadICNexus::ConfigureMonomorphic(Handle<Map> receiver_map,
Handle<Code> handler) {
- Handle<FixedArray> array = EnsureArrayOfSize(2);
Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- array->set(0, *cell);
- array->set(1, *handler);
+ SetFeedback(*cell);
+ SetFeedbackExtra(*handler);
}
void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
Handle<Map> receiver_map,
Handle<Code> handler) {
- Handle<FixedArray> array = EnsureArrayOfSize(3);
+ Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
if (name.is_null()) {
- array->set(0, Smi::FromInt(0));
+ SetFeedback(*cell);
+ SetFeedbackExtra(*handler);
} else {
- array->set(0, *name);
+ SetFeedback(*name);
+ Handle<FixedArray> array = EnsureExtraArrayOfSize(2);
+ array->set(0, *cell);
+ array->set(1, *handler);
}
- Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
- array->set(1, *cell);
- array->set(2, *handler);
}
void LoadICNexus::ConfigurePolymorphic(MapHandleList* maps,
CodeHandleList* handlers) {
+ Isolate* isolate = GetIsolate();
int receiver_count = maps->length();
- EnsureArrayOfSize(receiver_count * 2);
- InstallHandlers(0, maps, handlers);
+ Handle<FixedArray> array = EnsureArrayOfSize(receiver_count * 2);
+ InstallHandlers(array, maps, handlers);
+ SetFeedbackExtra(*vector()->UninitializedSentinel(isolate),
+ SKIP_WRITE_BARRIER);
}
@@ -368,26 +407,35 @@ void KeyedLoadICNexus::ConfigurePolymorphic(Handle<Name> name,
MapHandleList* maps,
CodeHandleList* handlers) {
int receiver_count = maps->length();
- Handle<FixedArray> array = EnsureArrayOfSize(1 + receiver_count * 2);
+ DCHECK(receiver_count > 1);
+ Handle<FixedArray> array;
if (name.is_null()) {
- array->set(0, Smi::FromInt(0));
+ array = EnsureArrayOfSize(receiver_count * 2);
+ SetFeedbackExtra(*vector()->UninitializedSentinel(GetIsolate()),
+ SKIP_WRITE_BARRIER);
} else {
- array->set(0, *name);
+ SetFeedback(*name);
+ array = EnsureExtraArrayOfSize(receiver_count * 2);
}
- InstallHandlers(1, maps, handlers);
+
+ InstallHandlers(array, maps, handlers);
}
-int FeedbackNexus::ExtractMaps(int start_index, MapHandleList* maps) const {
+int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
- if (feedback->IsFixedArray()) {
+ if (feedback->IsFixedArray() || feedback->IsString()) {
int found = 0;
+ if (feedback->IsString()) {
+ feedback = GetFeedbackExtra();
+ }
FixedArray* array = FixedArray::cast(feedback);
// The array should be of the form [<optional name>], then
// [map, handler, map, handler, ... ]
- DCHECK(array->length() >= (2 + start_index));
- for (int i = start_index; i < array->length(); i += 2) {
+ DCHECK(array->length() >= 2);
+ for (int i = 0; i < array->length(); i += 2) {
+ DCHECK(array->get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(array->get(i));
if (!cell->cleared()) {
Map* map = Map::cast(cell->value());
@@ -396,18 +444,28 @@ int FeedbackNexus::ExtractMaps(int start_index, MapHandleList* maps) const {
}
}
return found;
+ } else if (feedback->IsWeakCell()) {
+ WeakCell* cell = WeakCell::cast(feedback);
+ if (!cell->cleared()) {
+ Map* map = Map::cast(cell->value());
+ maps->Add(handle(map, isolate));
+ return 1;
+ }
}
return 0;
}
-MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(int start_index,
- Handle<Map> map) const {
+MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(Handle<Map> map) const {
Object* feedback = GetFeedback();
- if (feedback->IsFixedArray()) {
+ if (feedback->IsFixedArray() || feedback->IsString()) {
+ if (feedback->IsString()) {
+ feedback = GetFeedbackExtra();
+ }
FixedArray* array = FixedArray::cast(feedback);
- for (int i = start_index; i < array->length(); i += 2) {
+ for (int i = 0; i < array->length(); i += 2) {
+ DCHECK(array->get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(array->get(i));
if (!cell->cleared()) {
Map* array_map = Map::cast(cell->value());
@@ -418,23 +476,35 @@ MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(int start_index,
}
}
}
+ } else if (feedback->IsWeakCell()) {
+ WeakCell* cell = WeakCell::cast(feedback);
+ if (!cell->cleared()) {
+ Map* cell_map = Map::cast(cell->value());
+ if (cell_map == *map) {
+ Code* code = Code::cast(GetFeedbackExtra());
+ DCHECK(code->kind() == Code::HANDLER);
+ return handle(code);
+ }
+ }
}
return MaybeHandle<Code>();
}
-bool FeedbackNexus::FindHandlers(int start_index, CodeHandleList* code_list,
- int length) const {
+bool FeedbackNexus::FindHandlers(CodeHandleList* code_list, int length) const {
Object* feedback = GetFeedback();
int count = 0;
- if (feedback->IsFixedArray()) {
+ if (feedback->IsFixedArray() || feedback->IsString()) {
+ if (feedback->IsString()) {
+ feedback = GetFeedbackExtra();
+ }
FixedArray* array = FixedArray::cast(feedback);
- // The array should be of the form [<optional name>], then
- // [map, handler, map, handler, ... ]. Be sure to skip handlers whose maps
- // have been cleared.
- DCHECK(array->length() >= (2 + start_index));
- for (int i = start_index; i < array->length(); i += 2) {
+ // The array should be of the form [map, handler, map, handler, ... ].
+ // Be sure to skip handlers whose maps have been cleared.
+ DCHECK(array->length() >= 2);
+ for (int i = 0; i < array->length(); i += 2) {
+ DCHECK(array->get(i)->IsWeakCell());
WeakCell* cell = WeakCell::cast(array->get(i));
if (!cell->cleared()) {
Code* code = Code::cast(array->get(i + 1));
@@ -443,16 +513,19 @@ bool FeedbackNexus::FindHandlers(int start_index, CodeHandleList* code_list,
count++;
}
}
+ } else if (feedback->IsWeakCell()) {
+ WeakCell* cell = WeakCell::cast(feedback);
+ if (!cell->cleared()) {
+ Code* code = Code::cast(GetFeedbackExtra());
+ DCHECK(code->kind() == Code::HANDLER);
+ code_list->Add(handle(code));
+ count++;
+ }
}
return count == length;
}
-int LoadICNexus::ExtractMaps(MapHandleList* maps) const {
- return FeedbackNexus::ExtractMaps(0, maps);
-}
-
-
void LoadICNexus::Clear(Code* host) { LoadIC::Clear(GetIsolate(), host, this); }
@@ -461,39 +534,10 @@ void KeyedLoadICNexus::Clear(Code* host) {
}
-int KeyedLoadICNexus::ExtractMaps(MapHandleList* maps) const {
- return FeedbackNexus::ExtractMaps(1, maps);
-}
-
-
-MaybeHandle<Code> LoadICNexus::FindHandlerForMap(Handle<Map> map) const {
- return FeedbackNexus::FindHandlerForMap(0, map);
-}
-
-
-MaybeHandle<Code> KeyedLoadICNexus::FindHandlerForMap(Handle<Map> map) const {
- return FeedbackNexus::FindHandlerForMap(1, map);
-}
-
-
-bool LoadICNexus::FindHandlers(CodeHandleList* code_list, int length) const {
- return FeedbackNexus::FindHandlers(0, code_list, length);
-}
-
-
-bool KeyedLoadICNexus::FindHandlers(CodeHandleList* code_list,
- int length) const {
- return FeedbackNexus::FindHandlers(1, code_list, length);
-}
-
-
Name* KeyedLoadICNexus::FindFirstName() const {
Object* feedback = GetFeedback();
- if (feedback->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(feedback);
- DCHECK(array->length() >= 3);
- Object* name = array->get(0);
- if (name->IsName()) return Name::cast(name);
+ if (feedback->IsString()) {
+ return Name::cast(feedback);
}
return NULL;
}
diff --git a/deps/v8/src/type-feedback-vector.h b/deps/v8/src/type-feedback-vector.h
index b7abad51f1..c26a2d3e0d 100644
--- a/deps/v8/src/type-feedback-vector.h
+++ b/deps/v8/src/type-feedback-vector.h
@@ -12,18 +12,44 @@
#include "src/heap/heap.h"
#include "src/isolate.h"
#include "src/objects.h"
+#include "src/zone-containers.h"
namespace v8 {
namespace internal {
class FeedbackVectorSpec {
public:
- FeedbackVectorSpec() : slots_(0), ic_slots_(0) {}
- FeedbackVectorSpec(int slots, int ic_slots)
- : slots_(slots), ic_slots_(ic_slots) {
- if (FLAG_vector_ics) ic_slot_kinds_.resize(ic_slots);
+ FeedbackVectorSpec() : slots_(0), has_ic_slot_(false) {}
+ explicit FeedbackVectorSpec(int slots) : slots_(slots), has_ic_slot_(false) {}
+ FeedbackVectorSpec(int slots, Code::Kind ic_slot_kind)
+ : slots_(slots), has_ic_slot_(true), ic_kind_(ic_slot_kind) {}
+
+ int slots() const { return slots_; }
+
+ int ic_slots() const { return has_ic_slot_ ? 1 : 0; }
+
+ Code::Kind GetKind(int ic_slot) const {
+ DCHECK(FLAG_vector_ics && has_ic_slot_ && ic_slot == 0);
+ return ic_kind_;
}
+ private:
+ int slots_;
+ bool has_ic_slot_;
+ Code::Kind ic_kind_;
+};
+
+
+class ZoneFeedbackVectorSpec {
+ public:
+ explicit ZoneFeedbackVectorSpec(Zone* zone)
+ : slots_(0), ic_slots_(0), ic_slot_kinds_(zone) {}
+
+ ZoneFeedbackVectorSpec(Zone* zone, int slots, int ic_slots)
+ : slots_(slots),
+ ic_slots_(ic_slots),
+ ic_slot_kinds_(FLAG_vector_ics ? ic_slots : 0, zone) {}
+
int slots() const { return slots_; }
void increase_slots(int count) { slots_ += count; }
@@ -46,7 +72,7 @@ class FeedbackVectorSpec {
private:
int slots_;
int ic_slots_;
- std::vector<unsigned char> ic_slot_kinds_;
+ ZoneVector<unsigned char> ic_slot_kinds_;
};
@@ -74,6 +100,8 @@ class TypeFeedbackVector : public FixedArray {
static const int kWithTypesIndex = 1;
static const int kGenericCountIndex = 2;
+ static int elements_per_ic_slot() { return FLAG_vector_ics ? 2 : 1; }
+
int first_ic_slot_index() const {
DCHECK(length() >= kReservedIndexCount);
return Smi::cast(get(kFirstICSlotIndex))->value();
@@ -114,7 +142,7 @@ class TypeFeedbackVector : public FixedArray {
int ICSlots() const {
if (length() == 0) return 0;
- return length() - first_ic_slot_index();
+ return (length() - first_ic_slot_index()) / elements_per_ic_slot();
}
// Conversion from a slot or ic slot to an integer index to the underlying
@@ -127,7 +155,7 @@ class TypeFeedbackVector : public FixedArray {
int GetIndex(FeedbackVectorICSlot slot) const {
int first_ic_slot = first_ic_slot_index();
DCHECK(slot.ToInt() < ICSlots());
- return first_ic_slot + slot.ToInt();
+ return first_ic_slot + slot.ToInt() * elements_per_ic_slot();
}
// Conversion from an integer index to either a slot or an ic slot. The caller
@@ -140,7 +168,8 @@ class TypeFeedbackVector : public FixedArray {
FeedbackVectorICSlot ToICSlot(int index) const {
DCHECK(index >= first_ic_slot_index() && index < length());
- return FeedbackVectorICSlot(index - first_ic_slot_index());
+ int ic_slot = (index - first_ic_slot_index()) / elements_per_ic_slot();
+ return FeedbackVectorICSlot(ic_slot);
}
Object* Get(FeedbackVectorSlot slot) const { return get(GetIndex(slot)); }
@@ -158,8 +187,9 @@ class TypeFeedbackVector : public FixedArray {
// IC slots need metadata to recognize the type of IC.
Code::Kind GetKind(FeedbackVectorICSlot slot) const;
+ template <typename Spec>
static Handle<TypeFeedbackVector> Allocate(Isolate* isolate,
- const FeedbackVectorSpec& spec);
+ const Spec* spec);
static Handle<TypeFeedbackVector> Copy(Isolate* isolate,
Handle<TypeFeedbackVector> vector);
@@ -244,14 +274,17 @@ class FeedbackNexus {
void FindAllMaps(MapHandleList* maps) const { ExtractMaps(maps); }
virtual InlineCacheState StateFromFeedback() const = 0;
- virtual int ExtractMaps(MapHandleList* maps) const = 0;
- virtual MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const = 0;
- virtual bool FindHandlers(CodeHandleList* code_list, int length = -1) const {
- return length == 0;
- }
+ virtual int ExtractMaps(MapHandleList* maps) const;
+ virtual MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const;
+ virtual bool FindHandlers(CodeHandleList* code_list, int length = -1) const;
virtual Name* FindFirstName() const { return NULL; }
Object* GetFeedback() const { return vector()->Get(slot()); }
+ Object* GetFeedbackExtra() const {
+ DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
+ int extra_index = vector()->GetIndex(slot()) + 1;
+ return vector()->get(extra_index);
+ }
protected:
Isolate* GetIsolate() const { return vector()->GetIsolate(); }
@@ -261,13 +294,17 @@ class FeedbackNexus {
vector()->Set(slot(), feedback, mode);
}
+ void SetFeedbackExtra(Object* feedback_extra,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER) {
+ DCHECK(TypeFeedbackVector::elements_per_ic_slot() > 1);
+ int index = vector()->GetIndex(slot()) + 1;
+ vector()->set(index, feedback_extra, mode);
+ }
+
Handle<FixedArray> EnsureArrayOfSize(int length);
- void InstallHandlers(int start_index, MapHandleList* maps,
+ Handle<FixedArray> EnsureExtraArrayOfSize(int length);
+ void InstallHandlers(Handle<FixedArray> array, MapHandleList* maps,
CodeHandleList* handlers);
- int ExtractMaps(int start_index, MapHandleList* maps) const;
- MaybeHandle<Code> FindHandlerForMap(int start_index, Handle<Map> map) const;
- bool FindHandlers(int start_index, CodeHandleList* code_list,
- int length) const;
private:
// The reason for having a vector handle and a raw pointer is that we can and
@@ -334,10 +371,6 @@ class LoadICNexus : public FeedbackNexus {
void ConfigurePolymorphic(MapHandleList* maps, CodeHandleList* handlers);
InlineCacheState StateFromFeedback() const OVERRIDE;
- int ExtractMaps(MapHandleList* maps) const OVERRIDE;
- MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const OVERRIDE;
- virtual bool FindHandlers(CodeHandleList* code_list,
- int length = -1) const OVERRIDE;
};
@@ -364,10 +397,6 @@ class KeyedLoadICNexus : public FeedbackNexus {
CodeHandleList* handlers);
InlineCacheState StateFromFeedback() const OVERRIDE;
- int ExtractMaps(MapHandleList* maps) const OVERRIDE;
- MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const OVERRIDE;
- virtual bool FindHandlers(CodeHandleList* code_list,
- int length = -1) const OVERRIDE;
Name* FindFirstName() const OVERRIDE;
};
}
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 96bd0ed0e3..087e1db148 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -52,11 +52,7 @@ Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorSlot slot) {
DCHECK(slot.ToInt() >= 0 && slot.ToInt() < feedback_vector_->length());
Object* obj = feedback_vector_->Get(slot);
- if (!obj->IsJSFunction() ||
- !CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
- return Handle<Object>(obj, isolate());
- }
- return Handle<Object>::cast(isolate()->factory()->undefined_value());
+ return Handle<Object>(obj, isolate());
}
@@ -74,10 +70,10 @@ Handle<Object> TypeFeedbackOracle::GetInfo(FeedbackVectorICSlot slot) {
obj = cell->value();
}
- if (!obj->IsJSFunction() ||
- !CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
+ if (obj->IsJSFunction() || obj->IsAllocationSite() || obj->IsSymbol()) {
return Handle<Object>(obj, isolate());
}
+
return undefined;
}
@@ -245,12 +241,7 @@ void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
Handle<Map> map;
Map* raw_map = code->FindFirstMap();
- if (raw_map != NULL) {
- if (Map::TryUpdate(handle(raw_map)).ToHandle(&map) &&
- CanRetainOtherContext(*map, *native_context_)) {
- map = Handle<Map>::null();
- }
- }
+ if (raw_map != NULL) Map::TryUpdate(handle(raw_map)).ToHandle(&map);
if (code->is_compare_ic_stub()) {
CompareICStub stub(code->stub_key(), isolate());
@@ -279,7 +270,7 @@ void TypeFeedbackOracle::BinaryType(TypeFeedbackId id,
DCHECK(op < BinaryOpICState::FIRST_TOKEN ||
op > BinaryOpICState::LAST_TOKEN);
*left = *right = *result = Type::None(zone());
- *fixed_right_arg = Maybe<int>();
+ *fixed_right_arg = Nothing<int>();
*allocation_site = Handle<AllocationSite>::null();
return;
}
@@ -313,7 +304,7 @@ Type* TypeFeedbackOracle::CountType(TypeFeedbackId id) {
void TypeFeedbackOracle::PropertyReceiverTypes(TypeFeedbackId id,
- Handle<String> name,
+ Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
@@ -343,7 +334,7 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorICSlot slot,
- Handle<String> name,
+ Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
LoadICNexus nexus(feedback_vector_, slot);
@@ -363,8 +354,9 @@ void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
}
-void TypeFeedbackOracle::AssignmentReceiverTypes(
- TypeFeedbackId id, Handle<String> name, SmallMapList* receiver_types) {
+void TypeFeedbackOracle::AssignmentReceiverTypes(TypeFeedbackId id,
+ Handle<Name> name,
+ SmallMapList* receiver_types) {
receiver_types->Clear();
Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
CollectReceiverTypes(id, name, flags, receiver_types);
@@ -388,7 +380,7 @@ void TypeFeedbackOracle::CountReceiverTypes(TypeFeedbackId id,
void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
- Handle<String> name,
+ Handle<Name> name,
Code::Flags flags,
SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
@@ -401,7 +393,7 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
template <class T>
-void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<String> name,
+void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<Name> name,
Code::Flags flags,
SmallMapList* types) {
if (FLAG_collect_megamorphic_maps_from_stub_cache &&
@@ -415,43 +407,6 @@ void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<String> name,
}
-// Check if a map originates from a given native context. We use this
-// information to filter out maps from different context to avoid
-// retaining objects from different tabs in Chrome via optimized code.
-bool TypeFeedbackOracle::CanRetainOtherContext(Map* map,
- Context* native_context) {
- Object* constructor = NULL;
- while (!map->prototype()->IsNull()) {
- constructor = map->constructor();
- if (!constructor->IsNull()) {
- // If the constructor is not null or a JSFunction, we have to
- // conservatively assume that it may retain a native context.
- if (!constructor->IsJSFunction()) return true;
- // Check if the constructor directly references a foreign context.
- if (CanRetainOtherContext(JSFunction::cast(constructor),
- native_context)) {
- return true;
- }
- }
- map = HeapObject::cast(map->prototype())->map();
- }
- constructor = map->constructor();
- if (constructor->IsNull()) return false;
- // If the constructor is not null or a JSFunction, we have to conservatively
- // assume that it may retain a native context.
- if (!constructor->IsJSFunction()) return true;
- JSFunction* function = JSFunction::cast(constructor);
- return CanRetainOtherContext(function, native_context);
-}
-
-
-bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function,
- Context* native_context) {
- return function->context()->global_object() != native_context->global_object()
- && function->context()->global_object() != native_context->builtins();
-}
-
-
void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
SmallMapList* types) {
Handle<Object> object = GetInfo(ast_id);
@@ -475,8 +430,8 @@ void TypeFeedbackOracle::CollectReceiverTypes(T* obj, SmallMapList* types) {
types->Reserve(maps.length(), zone());
for (int i = 0; i < maps.length(); i++) {
Handle<Map> map(maps.at(i));
- if (!CanRetainOtherContext(*map, *native_context_)) {
- types->AddMapIfMissing(map, zone());
+ if (IsRelevantFeedback(*map, *native_context_)) {
+ types->AddMapIfMissing(maps.at(i), zone());
}
}
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 65af76865e..bd275e671a 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -42,9 +42,9 @@ class TypeFeedbackOracle: public ZoneObject {
IcCheckType* key_type);
void GetLoadKeyType(TypeFeedbackId id, IcCheckType* key_type);
- void PropertyReceiverTypes(TypeFeedbackId id, Handle<String> name,
+ void PropertyReceiverTypes(TypeFeedbackId id, Handle<Name> name,
SmallMapList* receiver_types);
- void PropertyReceiverTypes(FeedbackVectorICSlot slot, Handle<String> name,
+ void PropertyReceiverTypes(FeedbackVectorICSlot slot, Handle<Name> name,
SmallMapList* receiver_types);
void KeyedPropertyReceiverTypes(TypeFeedbackId id,
SmallMapList* receiver_types,
@@ -53,8 +53,7 @@ class TypeFeedbackOracle: public ZoneObject {
void KeyedPropertyReceiverTypes(FeedbackVectorICSlot slot,
SmallMapList* receiver_types, bool* is_string,
IcCheckType* key_type);
- void AssignmentReceiverTypes(TypeFeedbackId id,
- Handle<String> name,
+ void AssignmentReceiverTypes(TypeFeedbackId id, Handle<Name> name,
SmallMapList* receiver_types);
void KeyedAssignmentReceiverTypes(TypeFeedbackId id,
SmallMapList* receiver_types,
@@ -68,9 +67,12 @@ class TypeFeedbackOracle: public ZoneObject {
template <class T>
void CollectReceiverTypes(T* obj, SmallMapList* types);
- static bool CanRetainOtherContext(Map* map, Context* native_context);
- static bool CanRetainOtherContext(JSFunction* function,
- Context* native_context);
+ static bool IsRelevantFeedback(Map* map, Context* native_context) {
+ Object* constructor = map->GetConstructor();
+ return !constructor->IsJSFunction() ||
+ JSFunction::cast(constructor)->context()->native_context() ==
+ native_context;
+ }
Handle<JSFunction> GetCallTarget(FeedbackVectorICSlot slot);
Handle<AllocationSite> GetCallAllocationSite(FeedbackVectorICSlot slot);
@@ -104,12 +106,10 @@ class TypeFeedbackOracle: public ZoneObject {
Isolate* isolate() const { return isolate_; }
private:
- void CollectReceiverTypes(TypeFeedbackId id,
- Handle<String> name,
- Code::Flags flags,
- SmallMapList* types);
+ void CollectReceiverTypes(TypeFeedbackId id, Handle<Name> name,
+ Code::Flags flags, SmallMapList* types);
template <class T>
- void CollectReceiverTypes(T* obj, Handle<String> name, Code::Flags flags,
+ void CollectReceiverTypes(T* obj, Handle<Name> name, Code::Flags flags,
SmallMapList* types);
// Returns true if there is at least one string map and if
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index 4420bce4bc..caa428cc91 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -162,16 +162,16 @@ function NAMESubArray(begin, end) {
var srcLength = %_TypedArrayGetLength(this);
if (beginInt < 0) {
- beginInt = MathMax(0, srcLength + beginInt);
+ beginInt = $max(0, srcLength + beginInt);
} else {
- beginInt = MathMin(srcLength, beginInt);
+ beginInt = $min(srcLength, beginInt);
}
var endInt = IS_UNDEFINED(end) ? srcLength : end;
if (endInt < 0) {
- endInt = MathMax(0, srcLength + endInt);
+ endInt = $max(0, srcLength + endInt);
} else {
- endInt = MathMin(endInt, srcLength);
+ endInt = $min(endInt, srcLength);
}
if (endInt < beginInt) {
endInt = beginInt;
diff --git a/deps/v8/src/types.cc b/deps/v8/src/types.cc
index 37386cd82f..9116a693f0 100644
--- a/deps/v8/src/types.cc
+++ b/deps/v8/src/types.cc
@@ -267,7 +267,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
// Also, it doesn't apply elsewhere. 8-(
// We ought to find a cleaner solution for compiling stubs parameterised
// over type or class variables, esp ones with bounds...
- return kDetectable;
+ return kDetectable & kTaggedPointer;
case DECLARED_ACCESSOR_INFO_TYPE:
case EXECUTABLE_ACCESSOR_INFO_TYPE:
case SHARED_FUNCTION_INFO_TYPE:
@@ -777,7 +777,7 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
bits &= ~number_bits;
result->Set(0, BitsetType::New(bits, region));
}
- return NormalizeUnion(result, size);
+ return NormalizeUnion(result, size, region);
}
@@ -992,7 +992,7 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
size = AddToUnion(type1, result, size, region);
size = AddToUnion(type2, result, size, region);
- return NormalizeUnion(result, size);
+ return NormalizeUnion(result, size, region);
}
@@ -1016,9 +1016,9 @@ int TypeImpl<Config>::AddToUnion(
}
-template<class Config>
+template <class Config>
typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
- UnionHandle unioned, int size) {
+ UnionHandle unioned, int size, Region* region) {
DCHECK(size >= 1);
DCHECK(unioned->Get(0)->IsBitset());
// If the union has just one element, return it.
@@ -1032,8 +1032,11 @@ typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
if (representation == unioned->Get(1)->Representation()) {
return unioned->Get(1);
}
- // TODO(jarin) If the element at 1 is range of constant, slap
- // the representation on it and return that.
+ if (unioned->Get(1)->IsRange()) {
+ return RangeType::New(unioned->Get(1)->AsRange()->Min(),
+ unioned->Get(1)->AsRange()->Max(), unioned->Get(0),
+ region);
+ }
}
unioned->Shrink(size);
SLOW_DCHECK(unioned->Wellformed());
diff --git a/deps/v8/src/types.h b/deps/v8/src/types.h
index 0aae064171..1756229f97 100644
--- a/deps/v8/src/types.h
+++ b/deps/v8/src/types.h
@@ -615,7 +615,8 @@ class TypeImpl : public Config::Base {
TypeHandle type, UnionHandle result, int size, Region* region);
static int IntersectAux(TypeHandle type, TypeHandle other, UnionHandle result,
int size, Limits* limits, Region* region);
- static TypeHandle NormalizeUnion(UnionHandle unioned, int size);
+ static TypeHandle NormalizeUnion(UnionHandle unioned, int size,
+ Region* region);
static TypeHandle NormalizeRangeAndBitset(RangeHandle range, bitset* bits,
Region* region);
};
diff --git a/deps/v8/src/typing.cc b/deps/v8/src/typing.cc
index 48528705bf..1598c09b59 100644
--- a/deps/v8/src/typing.cc
+++ b/deps/v8/src/typing.cc
@@ -410,7 +410,12 @@ void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
if (!prop->is_computed_name() &&
prop->key()->AsLiteral()->value()->IsInternalizedString() &&
prop->emit_store()) {
- prop->RecordTypeFeedback(oracle());
+ // Record type feed back for the property.
+ TypeFeedbackId id = prop->key()->AsLiteral()->LiteralFeedbackId();
+ SmallMapList maps;
+ oracle()->CollectReceiverTypes(id, &maps);
+ prop->set_receiver_type(maps.length() == 1 ? maps.at(0)
+ : Handle<Map>::null());
}
}
@@ -562,7 +567,17 @@ void AstTyper::VisitCall(Call* expr) {
void AstTyper::VisitCallNew(CallNew* expr) {
// Collect type feedback.
- expr->RecordTypeFeedback(oracle());
+ FeedbackVectorSlot allocation_site_feedback_slot =
+ FLAG_pretenuring_call_new ? expr->AllocationSiteFeedbackSlot()
+ : expr->CallNewFeedbackSlot();
+ expr->set_allocation_site(
+ oracle()->GetCallNewAllocationSite(allocation_site_feedback_slot));
+ bool monomorphic =
+ oracle()->CallNewIsMonomorphic(expr->CallNewFeedbackSlot());
+ expr->set_is_monomorphic(monomorphic);
+ if (monomorphic) {
+ expr->set_target(oracle()->GetCallNewTarget(expr->CallNewFeedbackSlot()));
+ }
RECURSE(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
@@ -640,7 +655,7 @@ void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
Type* type;
Type* left_type;
Type* right_type;
- Maybe<int> fixed_right_arg;
+ Maybe<int> fixed_right_arg = Nothing<int>();
Handle<AllocationSite> allocation_site;
oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
&left_type, &right_type, &type, &fixed_right_arg,
@@ -784,7 +799,6 @@ void AstTyper::VisitModuleDeclaration(ModuleDeclaration* declaration) {
void AstTyper::VisitImportDeclaration(ImportDeclaration* declaration) {
- RECURSE(Visit(declaration->module()));
}
diff --git a/deps/v8/src/unique.h b/deps/v8/src/unique.h
index b56ee84a33..c04c966a4f 100644
--- a/deps/v8/src/unique.h
+++ b/deps/v8/src/unique.h
@@ -108,7 +108,12 @@ class Unique {
}
template <class S> static Unique<T> cast(Unique<S> that) {
- return Unique<T>(that.raw_address_, Handle<T>::cast(that.handle_));
+ // Allow fetching location() to unsafe-cast the handle. This is necessary
+ // since we can't concurrently safe-cast. Safe-casting requires looking at
+ // the heap which may be moving concurrently to the compiler thread.
+ AllowHandleDereference allow_deref;
+ return Unique<T>(that.raw_address_,
+ Handle<T>(reinterpret_cast<T**>(that.handle_.location())));
}
inline bool IsInitialized() const {
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 1c9e3a6824..5c7afbec20 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -208,7 +208,7 @@ class BitFieldBase {
static const U kNext = kShift + kSize;
// Value for the field with all bits set.
- static const T kMax = static_cast<T>((1U << size) - 1);
+ static const T kMax = static_cast<T>((kOne << size) - 1);
// Tells whether the provided value fits into the bit field.
static bool is_valid(T value) {
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 495921eeb1..4c3c023af6 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -17,12 +17,12 @@
#include "src/hydrogen.h"
#include "src/isolate.h"
#include "src/lithium-allocator.h"
-#include "src/natives.h"
#include "src/objects.h"
#include "src/runtime-profiler.h"
#include "src/sampler.h"
-#include "src/serialize.h"
-#include "src/snapshot.h"
+#include "src/snapshot/natives.h"
+#include "src/snapshot/serialize.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 23143345f6..1ea3f412f8 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -9,9 +9,6 @@
// var $Number = global.Number;
// var $Function = global.Function;
// var $Array = global.Array;
-//
-// in math.js:
-// var $floor = MathFloor
var $isNaN = GlobalIsNaN;
var $isFinite = GlobalIsFinite;
@@ -35,6 +32,17 @@ function InstallFunctions(object, attributes, functions) {
}
+function OverrideFunction(object, name, f) {
+ ObjectDefineProperty(object, name, { value: f,
+ writeable: true,
+ configurable: true,
+ enumerable: false });
+ %FunctionSetName(f, name);
+ %FunctionRemovePrototype(f);
+ %SetNativeFlag(f);
+}
+
+
// Helper function to install a getter-only accessor property.
function InstallGetter(object, name, getter) {
%FunctionSetName(getter, name);
@@ -210,7 +218,7 @@ var DefaultObjectToString = NoSideEffectsObjectToString;
function NoSideEffectsObjectToString() {
if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) return "[object Undefined]";
if (IS_NULL(this)) return "[object Null]";
- return "[object " + %_ClassOf(ToObject(this)) + "]";
+ return "[object " + %_ClassOf(TO_OBJECT_INLINE(this)) + "]";
}
@@ -223,7 +231,7 @@ function ObjectToLocaleString() {
// ECMA-262 - 15.2.4.4
function ObjectValueOf() {
- return ToObject(this);
+ return TO_OBJECT_INLINE(this);
}
@@ -258,7 +266,7 @@ function ObjectPropertyIsEnumerable(V) {
var desc = GetOwnPropertyJS(this, P);
return IS_UNDEFINED(desc) ? false : desc.isEnumerable();
}
- return %IsPropertyEnumerable(ToObject(this), P);
+ return %IsPropertyEnumerable(TO_OBJECT_INLINE(this), P);
}
@@ -276,7 +284,7 @@ function ObjectDefineGetter(name, fun) {
desc.setGet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(ToObject(receiver), ToName(name), desc, false);
+ DefineOwnProperty(TO_OBJECT_INLINE(receiver), ToName(name), desc, false);
}
@@ -285,7 +293,7 @@ function ObjectLookupGetter(name) {
if (receiver == null && !IS_UNDETECTABLE(receiver)) {
receiver = %GlobalProxy(global);
}
- return %LookupAccessor(ToObject(receiver), ToName(name), GETTER);
+ return %LookupAccessor(TO_OBJECT_INLINE(receiver), ToName(name), GETTER);
}
@@ -302,7 +310,7 @@ function ObjectDefineSetter(name, fun) {
desc.setSet(fun);
desc.setEnumerable(true);
desc.setConfigurable(true);
- DefineOwnProperty(ToObject(receiver), ToName(name), desc, false);
+ DefineOwnProperty(TO_OBJECT_INLINE(receiver), ToName(name), desc, false);
}
@@ -311,12 +319,12 @@ function ObjectLookupSetter(name) {
if (receiver == null && !IS_UNDETECTABLE(receiver)) {
receiver = %GlobalProxy(global);
}
- return %LookupAccessor(ToObject(receiver), ToName(name), SETTER);
+ return %LookupAccessor(TO_OBJECT_INLINE(receiver), ToName(name), SETTER);
}
function ObjectKeys(obj) {
- obj = ToObject(obj);
+ obj = TO_OBJECT_INLINE(obj);
if (%_IsJSProxy(obj)) {
var handler = %GetHandler(obj);
var names = CallTrap0(handler, "keys", DerivedKeysTrap);
@@ -633,7 +641,7 @@ function GetOwnPropertyJS(obj, v) {
// GetOwnProperty returns an array indexed by the constants
// defined in macros.py.
// If p is not a property on obj undefined is returned.
- var props = %GetOwnProperty(ToObject(obj), p);
+ var props = %GetOwnProperty(TO_OBJECT_INLINE(obj), p);
return ConvertDescriptorArrayToDescriptor(props);
}
@@ -684,9 +692,9 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
// ES5 8.12.9.
function DefineObjectProperty(obj, p, desc, should_throw) {
- var current_array = %GetOwnProperty(ToObject(obj), ToName(p));
+ var current_array = %GetOwnProperty(obj, ToName(p));
var current = ConvertDescriptorArrayToDescriptor(current_array);
- var extensible = %IsExtensible(ToObject(obj));
+ var extensible = %IsExtensible(obj);
// Error handling according to spec.
// Step 3
@@ -1097,7 +1105,7 @@ function ObjectGetOwnPropertyKeys(obj, filter) {
// ES5 section 15.2.3.4.
function ObjectGetOwnPropertyNames(obj) {
- obj = ToObject(obj);
+ obj = TO_OBJECT_INLINE(obj);
// Special handling for proxies.
if (%_IsJSProxy(obj)) {
var handler = %GetHandler(obj);
@@ -1189,7 +1197,7 @@ function ObjectDefineProperties(obj, properties) {
if (!IS_SPEC_OBJECT(obj)) {
throw MakeTypeError("called_on_non_object", ["Object.defineProperties"]);
}
- var props = ToObject(properties);
+ var props = TO_OBJECT_INLINE(properties);
var names = GetOwnEnumerablePropertyNames(props);
var descriptors = new InternalArray();
for (var i = 0; i < names.length; i++) {
@@ -1368,7 +1376,7 @@ function ObjectIs(obj1, obj2) {
// ECMA-262, Edition 6, section B.2.2.1.1
function ObjectGetProto() {
- return %_GetPrototype(ToObject(this));
+ return %_GetPrototype(TO_OBJECT_INLINE(this));
}
@@ -1385,10 +1393,10 @@ function ObjectSetProto(proto) {
function ObjectConstructor(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
- return ToObject(x);
+ return TO_OBJECT_INLINE(x);
} else {
if (x == null) return { };
- return ToObject(x);
+ return TO_OBJECT_INLINE(x);
}
}
@@ -1514,7 +1522,7 @@ function NumberConstructor(x) {
// ECMA-262 section 15.7.4.2.
-function NumberToString(radix) {
+function NumberToStringJS(radix) {
// NOTE: Both Number objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
var number = this;
@@ -1542,7 +1550,7 @@ function NumberToString(radix) {
// ECMA-262 section 15.7.4.3
function NumberToLocaleString() {
- return %_CallFunction(this, NumberToString);
+ return %_CallFunction(this, NumberToStringJS);
}
@@ -1655,8 +1663,7 @@ function NumberIsNaN(number) {
function NumberIsSafeInteger(number) {
if (NumberIsFinite(number)) {
var integral = TO_INTEGER(number);
- if (integral == number)
- return MathAbs(integral) <= $Number.MAX_SAFE_INTEGER;
+ if (integral == number) return $abs(integral) <= $Number.MAX_SAFE_INTEGER;
}
return false;
}
@@ -1695,7 +1702,7 @@ function SetUpNumber() {
// Set up non-enumerable functions on the Number prototype object.
InstallFunctions($Number.prototype, DONT_ENUM, $Array(
- "toString", NumberToString,
+ "toString", NumberToStringJS,
"toLocaleString", NumberToLocaleString,
"valueOf", NumberValueOf,
"toFixed", NumberToFixedJS,
@@ -1836,7 +1843,7 @@ function NewFunctionFromString(arguments, function_token) {
// If the formal parameters string include ) - an illegal
// character - it may make the combined function expression
// compile. We avoid this problem by checking for this early on.
- if (%_CallFunction(p, ')', StringIndexOfJS) != -1) {
+ if (%_CallFunction(p, ')', $stringIndexOf) != -1) {
throw MakeSyntaxError('paren_in_arg_string', []);
}
// If the formal parameters include an unbalanced block comment, the
@@ -1879,21 +1886,11 @@ SetUpFunction();
// ----------------------------------------------------------------------------
// Iterator related spec functions.
-// ES6 rev 26, 2014-07-18
-// 7.4.1 CheckIterable ( obj )
-function ToIterable(obj) {
- if (!IS_SPEC_OBJECT(obj)) {
- return UNDEFINED;
- }
- return obj[symbolIterator];
-}
-
-
-// ES6 rev 26, 2014-07-18
-// 7.4.2 GetIterator ( obj, method )
+// ES6 rev 33, 2015-02-12
+// 7.4.1 GetIterator ( obj, method )
function GetIterator(obj, method) {
if (IS_UNDEFINED(method)) {
- method = ToIterable(obj);
+ method = obj[symbolIterator];
}
if (!IS_SPEC_FUNCTION(method)) {
throw MakeTypeError('not_iterable', [obj]);
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 2351e529a2..c0b8bd7843 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -20,6 +20,7 @@ const char* Variable::Mode2String(VariableMode mode) {
case CONST_LEGACY: return "CONST_LEGACY";
case LET: return "LET";
case CONST: return "CONST";
+ case IMPORT: return "IMPORT";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
@@ -32,8 +33,7 @@ const char* Variable::Mode2String(VariableMode mode) {
Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
- bool is_valid_ref, Kind kind,
- InitializationFlag initialization_flag,
+ Kind kind, InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag)
: scope_(scope),
name_(name),
@@ -42,8 +42,10 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
location_(UNALLOCATED),
index_(-1),
initializer_position_(RelocInfo::kNoPosition),
+ has_strong_mode_reference_(false),
+ strong_mode_reference_start_position_(RelocInfo::kNoPosition),
+ strong_mode_reference_end_position_(RelocInfo::kNoPosition),
local_if_not_shadowed_(NULL),
- is_valid_ref_(is_valid_ref),
force_context_allocation_(false),
is_used_(false),
initialization_flag_(initialization_flag),
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index 1adeb1f0f4..545c3bd1f5 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -18,7 +18,7 @@ namespace internal {
class Variable: public ZoneObject {
public:
- enum Kind { NORMAL, THIS, NEW_TARGET, ARGUMENTS };
+ enum Kind { NORMAL, FUNCTION, THIS, NEW_TARGET, ARGUMENTS };
enum Location {
// Before and during variable allocation, a variable whose location is
@@ -47,15 +47,13 @@ class Variable: public ZoneObject {
LOOKUP
};
- Variable(Scope* scope, const AstRawString* name, VariableMode mode,
- bool is_valid_ref, Kind kind, InitializationFlag initialization_flag,
+ Variable(Scope* scope, const AstRawString* name, VariableMode mode, Kind kind,
+ InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
// Printing support
static const char* Mode2String(VariableMode mode);
- bool IsValidReference() { return is_valid_ref_; }
-
// The source code for an eval() call may refer to a variable that is
// in an outer scope about which we don't know anything (it may not
// be the script scope). scope() is NULL in that case. Currently the
@@ -98,6 +96,7 @@ class Variable: public ZoneObject {
return initialization_flag_ == kNeedsInitialization;
}
+ bool is_function() const { return kind_ == FUNCTION; }
bool is_this() const { return kind_ == THIS; }
bool is_new_target() const { return kind_ == NEW_TARGET; }
bool is_arguments() const { return kind_ == ARGUMENTS; }
@@ -129,6 +128,25 @@ class Variable: public ZoneObject {
static int CompareIndex(Variable* const* v, Variable* const* w);
+ void RecordStrongModeReference(int start_position, int end_position) {
+ // Record the earliest reference to the variable. Used in error messages for
+ // strong mode references to undeclared variables.
+ if (has_strong_mode_reference_ &&
+ strong_mode_reference_start_position_ < start_position)
+ return;
+ has_strong_mode_reference_ = true;
+ strong_mode_reference_start_position_ = start_position;
+ strong_mode_reference_end_position_ = end_position;
+ }
+
+ bool has_strong_mode_reference() const { return has_strong_mode_reference_; }
+ int strong_mode_reference_start_position() const {
+ return strong_mode_reference_start_position_;
+ }
+ int strong_mode_reference_end_position() const {
+ return strong_mode_reference_end_position_;
+ }
+
private:
Scope* scope_;
const AstRawString* name_;
@@ -137,6 +155,11 @@ class Variable: public ZoneObject {
Location location_;
int index_;
int initializer_position_;
+ // Tracks whether the variable is bound to a VariableProxy which is in strong
+ // mode, and if yes, the source location of the reference.
+ bool has_strong_mode_reference_;
+ int strong_mode_reference_start_position_;
+ int strong_mode_reference_end_position_;
// If this field is set, this variable references the stored locally bound
// variable, but it might be shadowed by variable bindings introduced by
@@ -144,9 +167,6 @@ class Variable: public ZoneObject {
// binding scope (exclusive).
Variable* local_if_not_shadowed_;
- // Valid as a reference? (const and this are not valid, for example)
- bool is_valid_ref_;
-
// Usage info.
bool force_context_allocation_; // set by variable resolver
bool is_used_;
diff --git a/deps/v8/src/weak-collection.js b/deps/v8/src/weak-collection.js
index a44c3d7cd7..776043d9dc 100644
--- a/deps/v8/src/weak-collection.js
+++ b/deps/v8/src/weak-collection.js
@@ -20,30 +20,19 @@ function WeakMapConstructor(iterable) {
throw MakeTypeError('constructor_not_function', ['WeakMap']);
}
- var iter, adder;
+ %WeakCollectionInitialize(this);
if (!IS_NULL_OR_UNDEFINED(iterable)) {
- iter = GetIterator(ToObject(iterable));
- adder = this.set;
+ var adder = this.set;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['set', this]);
}
- }
-
- %WeakCollectionInitialize(this);
-
- if (IS_UNDEFINED(iter)) return;
-
- var next, done, nextItem;
- while (!(next = iter.next()).done) {
- if (!IS_SPEC_OBJECT(next)) {
- throw MakeTypeError('iterator_result_not_an_object', [next]);
- }
- nextItem = next.value;
- if (!IS_SPEC_OBJECT(nextItem)) {
- throw MakeTypeError('iterator_value_not_an_object', [nextItem]);
+ for (var nextItem of iterable) {
+ if (!IS_SPEC_OBJECT(nextItem)) {
+ throw MakeTypeError('iterator_value_not_an_object', [nextItem]);
+ }
+ %_CallFunction(this, nextItem[0], nextItem[1], adder);
}
- %_CallFunction(this, nextItem[0], nextItem[1], adder);
}
}
@@ -53,9 +42,7 @@ function WeakMapGet(key) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.get', this]);
}
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
+ if (!IS_SPEC_OBJECT(key)) return UNDEFINED;
return %WeakCollectionGet(this, key);
}
@@ -65,7 +52,7 @@ function WeakMapSet(key, value) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.set', this]);
}
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+ if (!IS_SPEC_OBJECT(key)) {
throw %MakeTypeError('invalid_weakmap_key', [this, key]);
}
return %WeakCollectionSet(this, key, value);
@@ -77,9 +64,7 @@ function WeakMapHas(key) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.has', this]);
}
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
+ if (!IS_SPEC_OBJECT(key)) return false;
return %WeakCollectionHas(this, key);
}
@@ -89,9 +74,7 @@ function WeakMapDelete(key) {
throw MakeTypeError('incompatible_method_receiver',
['WeakMap.prototype.delete', this]);
}
- if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
- throw %MakeTypeError('invalid_weakmap_key', [this, key]);
- }
+ if (!IS_SPEC_OBJECT(key)) return false;
return %WeakCollectionDelete(this, key);
}
@@ -127,26 +110,16 @@ function WeakSetConstructor(iterable) {
throw MakeTypeError('constructor_not_function', ['WeakSet']);
}
- var iter, adder;
+ %WeakCollectionInitialize(this);
if (!IS_NULL_OR_UNDEFINED(iterable)) {
- iter = GetIterator(ToObject(iterable));
- adder = this.add;
+ var adder = this.add;
if (!IS_SPEC_FUNCTION(adder)) {
throw MakeTypeError('property_not_function', ['add', this]);
}
- }
-
- %WeakCollectionInitialize(this);
-
- if (IS_UNDEFINED(iter)) return;
-
- var next, done;
- while (!(next = iter.next()).done) {
- if (!IS_SPEC_OBJECT(next)) {
- throw MakeTypeError('iterator_result_not_an_object', [next]);
+ for (var value of iterable) {
+ %_CallFunction(this, value, adder);
}
- %_CallFunction(this, next.value, adder);
}
}
@@ -156,7 +129,7 @@ function WeakSetAdd(value) {
throw MakeTypeError('incompatible_method_receiver',
['WeakSet.prototype.add', this]);
}
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+ if (!IS_SPEC_OBJECT(value)) {
throw %MakeTypeError('invalid_weakset_value', [this, value]);
}
return %WeakCollectionSet(this, value, true);
@@ -168,9 +141,7 @@ function WeakSetHas(value) {
throw MakeTypeError('incompatible_method_receiver',
['WeakSet.prototype.has', this]);
}
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
+ if (!IS_SPEC_OBJECT(value)) return false;
return %WeakCollectionHas(this, value);
}
@@ -180,9 +151,7 @@ function WeakSetDelete(value) {
throw MakeTypeError('incompatible_method_receiver',
['WeakSet.prototype.delete', this]);
}
- if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
- throw %MakeTypeError('invalid_weakset_value', [this, value]);
- }
+ if (!IS_SPEC_OBJECT(value)) return false;
return %WeakCollectionDelete(this, value);
}
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 64c71cfaee..df0c7d28eb 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -265,6 +265,12 @@ void Assembler::set_target_address_at(Address pc,
}
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ Memory::Address_at(pc) = target;
+}
+
+
Address Assembler::target_address_from_return_address(Address pc) {
return pc - kCallTargetAddressOffset;
}
@@ -366,12 +372,24 @@ Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
}
-Address RelocInfo::target_reference() {
+Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
+Address RelocInfo::target_internal_reference() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return Memory::Address_at(pc_);
+}
+
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
void RelocInfo::set_target_object(Object* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
@@ -438,7 +456,8 @@ void RelocInfo::set_target_cell(Cell* cell,
void RelocInfo::WipeOut() {
- if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
@@ -542,7 +561,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -569,7 +589,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index dcfa01fd0e..abdf7a5d9c 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -108,60 +108,16 @@ void CpuFeatures::PrintFeatures() {
// -----------------------------------------------------------------------------
-// Implementation of RelocInfo
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard int3 instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- int code_size = Assembler::kCallSequenceLength + guard_bytes;
-
- // Create a code patcher.
- CodePatcher patcher(pc_, code_size);
-
- // Add a label for checking the size of the code used for returning.
-#ifdef DEBUG
- Label check_codesize;
- patcher.masm()->bind(&check_codesize);
-#endif
-
- // Patch the code.
- patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(target),
- Assembler::RelocInfoNone());
- patcher.masm()->call(kScratchRegister);
-
- // Check that the size of the code generated is as expected.
- DCHECK_EQ(Assembler::kCallSequenceLength,
- patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
-
- // Add the requested number of int3 instructions after the call.
- for (int i = 0; i < guard_bytes; i++) {
- patcher.masm()->int3();
- }
-}
-
-
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- for (int i = 0; i < instruction_count; i++) {
- *(pc_ + i) = *(instructions + i);
- }
-
- // Indicate that code has changed.
- CpuFeatures::FlushICache(pc_, instruction_count);
-}
-
-
-// -----------------------------------------------------------------------------
// Register constants.
const int
Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = {
- // rax, rbx, rdx, rcx, rsi, rdi, r8, r9, r11, r14, r15
- 0, 3, 2, 1, 6, 7, 8, 9, 11, 14, 15
+ // rax, rbx, rdx, rcx, rsi, rdi, r8, r9, r11, r12, r14, r15
+ 0, 3, 2, 1, 6, 7, 8, 9, 11, 12, 14, 15
};
const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
- 0, 3, 2, 1, -1, -1, 4, 5, 6, 7, -1, 8, -1, -1, 9, 10
+ 0, 3, 2, 1, -1, -1, 4, 5, 6, 7, -1, 8, 9, -1, 10, 11
};
@@ -786,6 +742,15 @@ void Assembler::bsrl(Register dst, Register src) {
}
+void Assembler::bsrl(Register dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0xBD);
+ emit_operand(dst, src);
+}
+
+
void Assembler::call(Label* L) {
positions_recorder()->WriteRecordedPositions();
EnsureSpace ensure_space(this);
@@ -2573,6 +2538,16 @@ void Assembler::movd(XMMRegister dst, Register src) {
}
+void Assembler::movd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x6E);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::movd(Register dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0x66);
@@ -2676,6 +2651,45 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
}
+void Assembler::pextrd(Register dst, XMMRegister src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(src, dst);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x16);
+ emit_sse_operand(src, dst);
+ emit(imm8);
+}
+
+
+void Assembler::pinsrd(XMMRegister dst, Register src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x22);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
+void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
+ DCHECK(IsEnabled(SSE4_1));
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x3A);
+ emit(0x22);
+ emit_sse_operand(dst, src);
+ emit(imm8);
+}
+
+
void Assembler::movsd(const Operand& dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2); // double
@@ -3246,8 +3260,7 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::roundsd(XMMRegister dst, XMMRegister src,
- Assembler::RoundingMode mode) {
+void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
DCHECK(IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
emit(0x66);
@@ -3290,6 +3303,66 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x62);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::punpckhdq(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0x66);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x6A);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::maxsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minsd(XMMRegister dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::minsd(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x5D);
+ emit_sse_operand(dst, src);
+}
+
+
// AVX instructions
void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2) {
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 116816c872..cfb89c8cc9 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -40,7 +40,7 @@
#include <deque>
#include "src/assembler.h"
-#include "src/serialize.h"
+#include "src/compiler.h"
namespace v8 {
namespace internal {
@@ -74,9 +74,8 @@ struct Register {
// rsp - stack pointer
// rbp - frame pointer
// r10 - fixed scratch register
- // r12 - smi constant register
// r13 - root register
- static const int kMaxNumAllocatableRegisters = 11;
+ static const int kMaxNumAllocatableRegisters = 12;
static int NumAllocatableRegisters() {
return kMaxNumAllocatableRegisters;
}
@@ -104,6 +103,7 @@ struct Register {
"r8",
"r9",
"r11",
+ "r12",
"r14",
"r15"
};
@@ -359,6 +359,14 @@ inline Condition CommuteCondition(Condition cc) {
}
+enum RoundingMode {
+ kRoundToNearest = 0x0,
+ kRoundDown = 0x1,
+ kRoundUp = 0x2,
+ kRoundToZero = 0x3
+};
+
+
// -----------------------------------------------------------------------------
// Machine instruction Immediates
@@ -561,6 +569,11 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, code, target);
}
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
static inline RelocInfo::Mode RelocInfoNone() {
if (kPointerSize == kInt64Size) {
return RelocInfo::NONE64;
@@ -886,6 +899,7 @@ class Assembler : public AssemblerBase {
void bt(const Operand& dst, Register src);
void bts(const Operand& dst, Register src);
void bsrl(Register dst, Register src);
+ void bsrl(Register dst, const Operand& src);
// Miscellaneous
void clc();
@@ -1063,6 +1077,7 @@ class Assembler : public AssemblerBase {
// SSE2 instructions
void movd(XMMRegister dst, Register src);
+ void movd(XMMRegister dst, const Operand& src);
void movd(Register dst, XMMRegister src);
void movq(XMMRegister dst, Register src);
void movq(Register dst, XMMRegister src);
@@ -1131,15 +1146,21 @@ class Assembler : public AssemblerBase {
void movmskpd(Register dst, XMMRegister src);
+ void punpckldq(XMMRegister dst, XMMRegister src);
+ void punpckhdq(XMMRegister dst, XMMRegister src);
+
+ void maxsd(XMMRegister dst, XMMRegister src);
+ void maxsd(XMMRegister dst, const Operand& src);
+ void minsd(XMMRegister dst, XMMRegister src);
+ void minsd(XMMRegister dst, const Operand& src);
+
// SSE 4.1 instruction
void extractps(Register dst, XMMRegister src, byte imm8);
- enum RoundingMode {
- kRoundToNearest = 0x0,
- kRoundDown = 0x1,
- kRoundUp = 0x2,
- kRoundToZero = 0x3
- };
+ void pextrd(Register dst, XMMRegister src, int8_t imm8);
+
+ void pinsrd(XMMRegister dst, Register src, int8_t imm8);
+ void pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
@@ -1318,6 +1339,18 @@ class Assembler : public AssemblerBase {
void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
vsd(0x5e, dst, src1, src2);
}
+ void vmaxsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(0x5f, dst, src1, src2);
+ }
+ void vmaxsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5f, dst, src1, src2);
+ }
+ void vminsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+ vsd(0x5d, dst, src1, src2);
+ }
+ void vminsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+ vsd(0x5d, dst, src1, src2);
+ }
void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
@@ -1341,7 +1374,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
+ void RecordDeoptReason(const int reason, const SourcePosition position);
// Allocate a constant pool of the correct size for the generated code.
Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index f43084b13f..3141c17919 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -1051,7 +1051,87 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+ const int calleeOffset) {
+ // rax : the number of items to be pushed to the stack
+ //
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
+ __ movp(rcx, rsp);
+ // Make rcx the space we have left. The stack might already be overflowed
+ // here which will cause rcx to become negative.
+ __ subp(rcx, kScratchRegister);
+ // Make rdx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
+ // Check if the arguments will overflow the stack.
+ __ cmpp(rcx, rdx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ Push(Operand(rbp, calleeOffset));
+ __ Push(rax);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+
+ __ bind(&okay);
+}
+
+
+static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int argumentsOffset,
+ const int indexOffset,
+ const int limitOffset) {
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ __ movp(key, Operand(rbp, indexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movp(receiver, Operand(rbp, argumentsOffset)); // load arguments
+
+ // Use inline caching to speed up access to arguments.
+ if (FLAG_vector_ics) {
+ // TODO(mvstanton): Vector-based ics need additional infrastructure to
+ // be embedded here. For now, just call the runtime.
+ __ Push(receiver);
+ __ Push(key);
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ } else {
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+ }
+
+ // Push the nth argument.
+ __ Push(rax);
+
+ // Update the index on the stack and in register key.
+ __ movp(key, Operand(rbp, indexOffset));
+ __ SmiAddConstant(key, key, Smi::FromInt(1));
+ __ movp(Operand(rbp, indexOffset), key);
+
+ __ bind(&entry);
+ __ cmpp(key, Operand(rbp, limitOffset));
+ __ j(not_equal, &loop);
+
+ // On exit, the pushed arguments count is in rax, untagged
+ __ SmiToInteger64(rax, key);
+}
+
+
+// Used by FunctionApply and ReflectApply
+static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
+ const int kFormalParameters = targetIsArgument ? 3 : 2;
+ const int kStackSize = kFormalParameters + 1;
+
// Stack at entry:
// rsp : return address
// rsp[8] : arguments
@@ -1071,30 +1151,13 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ Push(Operand(rbp, kFunctionOffset));
__ Push(Operand(rbp, kArgumentsOffset));
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- __ LoadRoot(kScratchRegister, Heap::kRealStackLimitRootIndex);
- __ movp(rcx, rsp);
- // Make rcx the space we have left. The stack might already be overflowed
- // here which will cause rcx to become negative.
- __ subp(rcx, kScratchRegister);
- // Make rdx the space we need for the array when it is unrolled onto the
- // stack.
- __ PositiveSmiTimesPowerOfTwoToInteger64(rdx, rax, kPointerSizeLog2);
- // Check if the arguments will overflow the stack.
- __ cmpp(rcx, rdx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ Push(Operand(rbp, kFunctionOffset));
- __ Push(rax);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
+ if (targetIsArgument) {
+ __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ } else {
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ }
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current index and limit.
const int kLimitOffset =
@@ -1156,54 +1219,20 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&push_receiver);
__ Push(rbx);
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- __ movp(key, Operand(rbp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ movp(receiver, Operand(rbp, kArgumentsOffset)); // load arguments
-
- // Use inline caching to speed up access to arguments.
- if (FLAG_vector_ics) {
- // TODO(mvstanton): Vector-based ics need additional infrastructure to
- // be embedded here. For now, just call the runtime.
- __ Push(receiver);
- __ Push(key);
- __ CallRuntime(Runtime::kGetProperty, 2);
- } else {
- Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
- __ Call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
- }
-
- // Push the nth argument.
- __ Push(rax);
-
- // Update the index on the stack and in register key.
- __ movp(key, Operand(rbp, kIndexOffset));
- __ SmiAddConstant(key, key, Smi::FromInt(1));
- __ movp(Operand(rbp, kIndexOffset), key);
-
- __ bind(&entry);
- __ cmpp(key, Operand(rbp, kLimitOffset));
- __ j(not_equal, &loop);
+ // Loop over the arguments array, pushing each value to the stack
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
ParameterCount actual(rax);
- __ SmiToInteger32(rax, key);
__ movp(rdi, Operand(rbp, kFunctionOffset));
__ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
__ j(not_equal, &call_proxy);
__ InvokeFunction(rdi, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
// Call the function proxy.
__ bind(&call_proxy);
@@ -1216,7 +1245,92 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Leave internal frame.
}
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
+}
+
+
+// Used by ReflectConstruct
+static void Generate_ConstructHelper(MacroAssembler* masm) {
+ const int kFormalParameters = 3;
+ const int kStackSize = kFormalParameters + 1;
+
+ // Stack at entry:
+ // rsp : return address
+ // rsp[8] : original constructor (new.target)
+ // rsp[16] : arguments
+ // rsp[24] : constructor
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Stack frame:
+ // rbp : Old base pointer
+ // rbp[8] : return address
+ // rbp[16] : original constructor (new.target)
+ // rbp[24] : arguments
+ // rbp[32] : constructor
+ static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
+ static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
+ static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+
+ // If newTarget is not supplied, set it to constructor
+ Label validate_arguments;
+ __ movp(rax, Operand(rbp, kNewTargetOffset));
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &validate_arguments, Label::kNear);
+ __ movp(rax, Operand(rbp, kFunctionOffset));
+ __ movp(Operand(rbp, kNewTargetOffset), rax);
+
+ // Validate arguments
+ __ bind(&validate_arguments);
+ __ Push(Operand(rbp, kFunctionOffset));
+ __ Push(Operand(rbp, kArgumentsOffset));
+ __ Push(Operand(rbp, kNewTargetOffset));
+ __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ Push(rax); // limit
+ __ Push(Immediate(0)); // index
+ // Push newTarget and callee functions
+ __ Push(Operand(rbp, kNewTargetOffset));
+ __ Push(Operand(rbp, kFunctionOffset));
+
+ // Loop over the arguments array, pushing each value to the stack
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+
+ // Use undefined feedback vector
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ __ movp(rdi, Operand(rbp, kFunctionOffset));
+
+ // Call the function.
+ CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ // Leave internal frame.
+ }
+ // remove this, target, arguments and newTarget
+ __ ret(kStackSize * kPointerSize);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, false);
+}
+
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, true);
+}
+
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ Generate_ConstructHelper(masm);
}
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 2a28767010..6ca81fe4b4 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -11,6 +11,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@@ -929,7 +930,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ ret(0);
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -961,10 +962,17 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ movp(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
+ // If the constructor was [[Call]]ed, the call will not push a new.target
+ // onto the stack. In that case the arguments array we construct is bogus,
+ // bu we do not care as the constructor throws immediately.
+ __ Cmp(rcx, Smi::FromInt(0));
+ Label skip_decrement;
+ __ j(equal, &skip_decrement);
// Subtract 1 from smi-tagged arguments count.
__ SmiToInteger32(rcx, rcx);
__ decl(rcx);
__ Integer32ToSmi(rcx, rcx);
+ __ bind(&skip_decrement);
}
__ movp(args.GetArgumentOperand(2), rcx);
__ SmiToInteger64(rcx, rcx);
@@ -1048,7 +1056,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1429,19 +1437,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ cmpp(rax, rdx);
__ j(equal, &runtime);
- __ movp(pending_exception_operand, rdx);
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- Label termination_exception;
- __ j(equal, &termination_exception, Label::kNear);
- __ Throw(rax);
-
- __ bind(&termination_exception);
- __ ThrowUncatchable(rax);
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -2427,14 +2429,13 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ CompareRoot(rax, Heap::kExceptionRootIndex);
__ j(equal, &exception_returned);
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
-
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
Label okay;
__ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
__ cmpp(r14, pending_exception_operand);
@@ -2450,26 +2451,47 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ bind(&exception_returned);
- // Retrieve the pending exception.
- Operand pending_exception_operand =
- masm->ExternalOperand(pending_exception_address);
- __ movp(rax, pending_exception_operand);
-
- // Clear the pending exception.
- __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ movp(pending_exception_operand, rdx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- Label throw_termination_exception;
- __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
- __ j(equal, &throw_termination_exception);
-
- // Handle normal exception.
- __ Throw(rax);
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
+
+ // Ask the runtime for help to determine the handler. This will set rax to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ movp(arg_reg_1, Immediate(0)); // argc.
+ __ movp(arg_reg_2, Immediate(0)); // argv.
+ __ Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
+ __ PrepareCallCFunction(3);
+ __ CallCFunction(find_handler, 3);
+ }
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(rax);
+ // Retrieve the handler context, SP and FP.
+ __ movp(rsi, masm->ExternalOperand(pending_handler_context_address));
+ __ movp(rsp, masm->ExternalOperand(pending_handler_sp_address));
+ __ movp(rbp, masm->ExternalOperand(pending_handler_fp_address));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (rsi == 0) for non-JS frames.
+ Label skip;
+ __ testp(rsi, rsi);
+ __ j(zero, &skip, Label::kNear);
+ __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
+ __ bind(&skip);
+
+ // Compute the handler entry address and jump to it.
+ __ movp(rdi, masm->ExternalOperand(pending_handler_code_address));
+ __ movp(rdx, masm->ExternalOperand(pending_handler_offset_address));
+ __ leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
+ __ jmp(rdi);
}
@@ -2521,7 +2543,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Set up the roots and smi constant registers.
// Needs to be done before any further smi loads.
- __ InitializeSmiConstantRegister();
__ InitializeRootRegister();
}
@@ -2559,10 +2580,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ LoadRoot(rax, Heap::kExceptionRootIndex);
__ jmp(&exit);
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ __ PushStackHandler();
// Clear any pending exceptions.
__ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
@@ -2588,7 +2608,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ call(kScratchRegister);
// Unlink this frame from the handler chain.
- __ PopTryHandler();
+ __ PopStackHandler();
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
@@ -2884,7 +2904,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
+ MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@@ -2897,6 +2917,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Push(VectorLoadICDescriptor::VectorRegister());
+ __ Push(VectorLoadICDescriptor::SlotRegister());
+ }
__ Push(object_);
__ Push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
@@ -2912,6 +2936,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ movp(index_, rax);
}
__ Pop(object_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ Pop(VectorLoadICDescriptor::SlotRegister());
+ __ Pop(VectorLoadICDescriptor::VectorRegister());
+ }
// Reload the instance type.
__ movp(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
@@ -3214,7 +3242,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// rax: string
@@ -3451,7 +3479,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@@ -3749,7 +3777,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ bind(&miss);
@@ -4316,15 +4344,228 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorLoadStub stub(isolate(), state());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawLoadStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorKeyedLoadStub stub(isolate());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawKeyedLoadStub stub(isolate());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register feedback, Register scratch1,
+ Register scratch2, Register scratch3,
+ Register scratch4, bool is_polymorphic,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+
+ Register receiver_map = scratch1;
+ Register counter = scratch2;
+ Register length = scratch3;
+ Register cached_map = scratch4;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ movp(receiver_map, FieldOperand(receiver, 0));
+ __ bind(&compare_map);
+ __ movp(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+ __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &start_polymorphic);
+
+ // found, now call handler.
+ Register handler = feedback;
+ __ movp(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+
+ // Polymorphic, we have to loop from 2 to N
+ __ bind(&start_polymorphic);
+ __ SmiToInteger32(length, FieldOperand(feedback, FixedArray::kLengthOffset));
+ if (!is_polymorphic) {
+ // If the IC could be monomorphic we have to make sure we don't go past the
+ // end of the feedback array.
+ __ cmpl(length, Immediate(2));
+ __ j(equal, miss);
+ }
+ __ movl(counter, Immediate(2));
+
+ __ bind(&next_loop);
+ __ movp(cached_map, FieldOperand(feedback, counter, times_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmpp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &prepare_next);
+ __ movp(handler, FieldOperand(feedback, counter, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+
+ __ bind(&prepare_next);
+ __ addl(counter, Immediate(2));
+ __ cmpl(counter, length);
+ __ j(less, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ jmp(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register weak_cell, Register integer_slot,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+
+ // Move the weak map into the weak_cell register.
+ Register ic_map = weak_cell;
+ __ movp(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ cmpp(ic_map, FieldOperand(receiver, 0));
+ __ j(not_equal, miss);
+ Register handler = weak_cell;
+ __ movp(handler, FieldOperand(vector, integer_slot, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ __ bind(&compare_smi_map);
+ __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, miss);
+ __ movp(handler, FieldOperand(vector, integer_slot, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ leap(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+}
+
+
+void VectorRawLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // rdx
+ Register name = VectorLoadICDescriptor::NameRegister(); // rcx
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // rbx
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // rax
+ Register feedback = rdi;
+ Register integer_slot = r8;
+
+ __ SmiToInteger32(integer_slot, slot);
+ __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicCase(masm, receiver, name, vector, slot, feedback,
+ integer_slot, &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+ HandleArrayCases(masm, receiver, name, vector, slot, feedback, integer_slot,
+ r9, r11, r15, true, &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &miss);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::LOAD_IC, code_flags, false, receiver, name, feedback, no_reg);
+
+ __ bind(&miss);
+ LoadIC::GenerateMiss(masm);
+}
+
+
+void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // rdx
+ Register key = VectorLoadICDescriptor::NameRegister(); // rcx
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // rbx
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // rax
+ Register feedback = rdi;
+ Register integer_slot = r8;
+
+ __ SmiToInteger32(integer_slot, slot);
+ __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback,
+ integer_slot, &miss);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, integer_slot,
+ r9, r11, r15, true, &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmpp(key, feedback);
+ __ j(not_equal, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ movp(feedback, FieldOperand(vector, integer_slot, times_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, integer_slot,
+ r9, r11, r15, false, &miss);
+
+ __ bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
}
@@ -4761,7 +5002,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Operand* context_restore_operand) {
Label prologue;
Label promote_scheduled_exception;
- Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
Label write_back;
@@ -4838,13 +5078,22 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ movp(Operand(base_reg, kNextOffset), prev_next_address_reg);
__ cmpp(prev_limit_reg, Operand(base_reg, kLimitOffset));
__ j(not_equal, &delete_allocated_handles);
+
+ // Leave the API exit frame.
__ bind(&leave_exit_frame);
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ movp(rsi, *context_restore_operand);
+ }
+ if (stack_space_operand != nullptr) {
+ __ movp(rbx, *stack_space_operand);
+ }
+ __ LeaveApiExitFrame(!restore_context);
// Check if the function scheduled an exception.
- __ Move(rsi, scheduled_exception_address);
- __ Cmp(Operand(rsi, 0), factory->the_hole_value());
+ __ Move(rdi, scheduled_exception_address);
+ __ Cmp(Operand(rdi, 0), factory->the_hole_value());
__ j(not_equal, &promote_scheduled_exception);
- __ bind(&exception_handled);
#if DEBUG
// Check if the function returned a valid JavaScript value.
@@ -4881,14 +5130,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&ok);
#endif
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ movp(rsi, *context_restore_operand);
- }
- if (stack_space_operand != nullptr) {
- __ movp(rbx, *stack_space_operand);
- }
- __ LeaveApiExitFrame(!restore_context);
if (stack_space_operand != nullptr) {
DCHECK_EQ(stack_space, 0);
__ PopReturnAddressTo(rcx);
@@ -4898,12 +5139,9 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ ret(stack_space * kPointerSize);
}
+ // Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kPromoteScheduledException, 0);
- }
- __ jmp(&exception_handled);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index c8f9456f75..ee2b5c526a 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -14,58 +14,57 @@
namespace v8 {
namespace internal {
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void PatchCodeWithCall(Address pc, Address target, int guard_bytes) {
+ int code_size = Assembler::kCallSequenceLength + guard_bytes;
+ // Create a code patcher.
+ CodePatcher patcher(pc, code_size);
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- DCHECK(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
- rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
-}
+// Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+#endif
+ // Patch the code.
+ patcher.masm()->movp(kScratchRegister, reinterpret_cast<void*>(target),
+ Assembler::RelocInfoNone());
+ patcher.masm()->call(kScratchRegister);
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
-}
+ // Check that the size of the code generated is as expected.
+ DCHECK_EQ(Assembler::kCallSequenceLength,
+ patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+ // Add the requested number of int3 instructions after the call.
+ for (int i = 0; i < guard_bytes; i++) {
+ patcher.masm()->int3();
+ }
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
+ CpuFeatures::FlushICache(pc, code_size);
}
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
+// Patch the JS frame exit code with a debug break call. See
+// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc
+// for the precise return instructions sequence.
+void BreakLocation::SetDebugBreakAtReturn() {
+ DCHECK(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
+ PatchCodeWithCall(
+ pc(), debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
+ Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
}
-void BreakLocationIterator::SetDebugBreakAtSlot() {
+void BreakLocation::SetDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
- rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry(),
+ PatchCodeWithCall(
+ pc(), debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
}
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-
#define __ ACCESS_MASM(masm)
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 2fc61814c8..02e9d2e66f 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -134,7 +134,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
#define __ masm()->
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Save all general purpose registers before messing with them.
@@ -307,7 +307,6 @@ void Deoptimizer::EntryGenerator::Generate() {
// Set up the roots register.
__ InitializeRootRegister();
- __ InitializeSmiConstantRegister();
// Return to the continuation point.
__ ret(0);
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index bed99d101a..4f5e74fa83 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -956,11 +956,21 @@ int DisassemblerX64::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5d:
+ AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5e:
AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5f:
+ AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ break;
default:
UnimplementedInstruction();
}
@@ -1179,6 +1189,19 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
AppendToBuffer(",%d", (*current) & 3);
current += 1;
+ } else if (third_byte == 0x16) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("pextrd "); // reg/m32, xmm, imm8
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
+ current += 1;
+ } else if (third_byte == 0x22) {
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("pinsrd "); // xmm, reg/m32, imm8
+ AppendToBuffer(" %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ AppendToBuffer(",%d", (*current) & 3);
+ current += 1;
} else {
UnimplementedInstruction();
}
@@ -1229,12 +1252,12 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += PrintRightXMMOperand(current);
} else if (opcode == 0x72) {
current += 1;
- AppendToBuffer("%s,%s,%d", (regop == 6) ? "pslld" : "psrld",
+ AppendToBuffer("%s %s,%d", (regop == 6) ? "pslld" : "psrld",
NameOfXMMRegister(rm), *current & 0x7f);
current += 1;
} else if (opcode == 0x73) {
current += 1;
- AppendToBuffer("%s,%s,%d", (regop == 6) ? "psllq" : "psrlq",
+ AppendToBuffer("%s %s,%d", (regop == 6) ? "psllq" : "psrlq",
NameOfXMMRegister(rm), *current & 0x7f);
current += 1;
} else {
@@ -1251,6 +1274,10 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "comisd";
} else if (opcode == 0x76) {
mnemonic = "pcmpeqd";
+ } else if (opcode == 0x62) {
+ mnemonic = "punpckldq";
+ } else if (opcode == 0x6A) {
+ mnemonic = "punpckhdq";
} else {
UnimplementedInstruction();
}
@@ -1526,10 +1553,14 @@ const char* DisassemblerX64::TwoByteMnemonic(byte opcode) {
return "mulsd";
case 0x5A: // F2 prefix.
return "cvtsd2ss";
+ case 0x5D: // F2 prefix.
+ return "minsd";
case 0x5C: // F2 prefix.
return "subsd";
case 0x5E: // F2 prefix.
return "divsd";
+ case 0x5F: // F2 prefix.
+ return "maxsd";
case 0xA2:
return "cpuid";
case 0xA5:
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 8813030284..1f8612afae 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -107,15 +107,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-inline void StackHandler::SetFp(Address slot, Address fp) {
- if (kFPOnStackSize == 2 * kPointerSize) {
- // Zero out the high-32 bit of FP for x32 port.
- Memory::Address_at(slot + kPointerSize) = 0;
- }
- Memory::Address_at(slot) = fp;
-}
-
-
} } // namespace v8::internal
#endif // V8_X64_FRAMES_X64_H_
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 9d70abfbf1..5861327bb5 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -95,7 +95,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@@ -187,7 +188,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in rdi.
- if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+ if (info->scope()->is_script_scope()) {
__ Push(rdi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
@@ -232,6 +233,11 @@ void FullCodeGenerator::Generate() {
}
}
+ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
@@ -240,6 +246,11 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
+ if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
+ --num_parameters;
+ ++rest_index;
+ }
+
__ leap(rdx,
Operand(rbp, StandardFrameConstants::kCallerSPOffset + offset));
__ Push(rdx);
@@ -275,10 +286,6 @@ void FullCodeGenerator::Generate() {
// The stub will rewrite receiver and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
ArgumentsAccessStub::Type type;
if (is_strict(language_mode()) || !is_simple_parameter_list()) {
type = ArgumentsAccessStub::NEW_STRICT;
@@ -1489,7 +1496,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ Move(VectorLoadICDescriptor::SlotRegister(),
SmiFromSlot(proxy->VariableFeedbackSlot()));
}
- CallLoadIC(CONTEXTUAL);
+ CallGlobalLoadIC(var->name());
context()->Plug(rax);
break;
}
@@ -2132,7 +2139,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
__ Push(load_name);
__ Push(Operand(rsp, 2 * kPointerSize)); // iter
@@ -2144,16 +2150,17 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ Pop(rax); // result
- __ PushTryHandler(StackHandler::CATCH, expr->index());
- const int handler_size = StackHandlerConstants::kSize;
+ EnterTryBlock(expr->index(), &l_catch);
+ const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ Push(rax); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + handler_size;
+ const int generator_object_depth = kPointerSize + try_block_size;
__ movp(rax, Operand(rsp, generator_object_depth));
__ Push(rax); // g
+ __ Push(Smi::FromInt(expr->index())); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
Smi::FromInt(l_continuation.pos()));
@@ -2161,13 +2168,13 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ movp(rcx, rsi);
__ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ movp(context_register(),
Operand(rbp, StandardFrameConstants::kContextOffset));
__ Pop(rax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in rax
- __ PopTryHandler();
+ ExitTryBlock(expr->index());
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
@@ -2478,6 +2485,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ Push(Operand(rsp, 0)); // prototype
}
EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ Push(rax);
+ }
+
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@@ -2618,25 +2635,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ movp(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ Push(rax);
- __ Push(rsi);
- __ Push(var->name());
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, rcx);
- __ movp(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ j(not_equal, &skip);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2651,6 +2649,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
+ } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, rcx);
+ __ movp(rdx, location);
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &const_error, Label::kNear);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2672,8 +2684,33 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ DCHECK(var->mode() == CONST_LEGACY);
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ Push(rax);
+ __ Push(rsi);
+ __ Push(var->name());
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, rcx);
+ __ movp(rdx, location);
+ __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+ __ j(not_equal, &skip);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ }
+ // Silently ignore store in sloppy mode.
}
}
@@ -3143,8 +3180,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- if (!ValidateSuperCall(expr)) return;
-
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(result_register(), new_target_var);
__ Push(result_register());
@@ -3649,8 +3684,8 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ movp(rax, FieldOperand(rax, Map::kConstructorOffset));
- __ CmpObjectType(rax, JS_FUNCTION_TYPE, rbx);
+ __ GetMapConstructor(rax, rax, rbx);
+ __ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
__ j(not_equal, &non_function_constructor);
// rax now contains the constructor function. Grab the
@@ -3950,7 +3985,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -3998,7 +4033,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4177,7 +4212,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// Call runtime to perform the lookup.
__ Push(cache);
__ Push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ bind(&done);
context()->Plug(rax);
@@ -4519,18 +4554,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- if (expr->function() != NULL &&
- expr->function()->intrinsic_type == Runtime::INLINE) {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
if (expr->is_jsruntime()) {
+ Comment cmnt(masm_, "[ CallRuntime");
// Push the builtins object as receiver.
__ movp(rax, GlobalObjectOperand());
__ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
@@ -4566,14 +4594,27 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
context()->DropAndPlug(1, rax);
} else {
- // Push the arguments ("left-to-right").
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
+ const Runtime::Function* function = expr->function();
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: { \
+ Comment cmnt(masm_, "[ Inline" #Name); \
+ return Emit##Name(expr); \
+ }
+ FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- // Call the C runtime.
- __ CallRuntime(expr->function(), arg_count);
- context()->Plug(rax);
+ // Call the C runtime.
+ __ CallRuntime(function, arg_count);
+ context()->Plug(rax);
+ }
+ }
}
}
@@ -5221,17 +5262,6 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_pending_message_obj(isolate());
__ Load(rdx, pending_message_obj);
__ Push(rdx);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ Load(rdx, has_pending_message);
- __ Integer32ToSmi(rdx, rdx);
- __ Push(rdx);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Load(rdx, pending_message_script);
- __ Push(rdx);
}
@@ -5240,17 +5270,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(rcx));
// Restore pending message from stack.
__ Pop(rdx);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ Store(pending_message_script, rdx);
-
- __ Pop(rdx);
- __ SmiToInteger32(rdx, rdx);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ Store(has_pending_message, rdx);
-
- __ Pop(rdx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ Store(pending_message_obj, rdx);
@@ -5269,34 +5288,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ movp(rsi, Operand(rsp, StackHandlerConstants::kContextOffset));
- __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
- }
- __ PopTryHandler();
- __ call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-
-#undef __
-
static const byte kJnsInstruction = 0x79;
static const byte kNopByteOne = 0x66;
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index 1ca0c85877..0cef86e102 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -229,6 +229,12 @@ void InternalArrayConstructorDescriptor::Initialize(
}
+void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {rsi, rdx, rax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {rsi, rax};
data->Initialize(arraysize(registers), registers, NULL);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index f4d75775bb..bfbadebf2d 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -9,6 +9,7 @@
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
@@ -128,7 +129,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions need to replace the receiver with the global proxy
// when called as functions (without an explicit receiver object).
- if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
+ if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
StackArgumentsAccessor args(rsp, scope()->num_parameters());
@@ -296,10 +297,10 @@ void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
bool LCodeGen::GenerateJumpTable() {
+ if (jump_table_.length() == 0) return !is_aborted();
+
Label needs_frame;
- if (jump_table_.length() > 0) {
- Comment(";;; -------------------- Jump table --------------------");
- }
+ Comment(";;; -------------------- Jump table --------------------");
for (int i = 0; i < jump_table_.length(); i++) {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
@@ -308,23 +309,7 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
- if (needs_frame.is_bound()) {
- __ jmp(&needs_frame);
- } else {
- __ bind(&needs_frame);
- __ movp(rsi, MemOperand(rbp, StandardFrameConstants::kContextOffset));
- __ pushq(rbp);
- __ movp(rbp, rsp);
- __ Push(rsi);
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ Push(rsi);
- __ movp(rsi, MemOperand(rsp, kPointerSize));
- __ call(kScratchRegister);
- }
+ __ call(&needs_frame);
} else {
if (info()->saves_caller_doubles()) {
DCHECK(info()->IsStub());
@@ -332,7 +317,58 @@ bool LCodeGen::GenerateJumpTable() {
}
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
+ }
+
+ if (needs_frame.is_linked()) {
+ __ bind(&needs_frame);
+ /* stack layout
+ 4: return address <-- rsp
+ 3: garbage
+ 2: garbage
+ 1: garbage
+ 0: garbage
+ */
+ // Reserve space for context and stub marker.
+ __ subp(rsp, Immediate(2 * kPointerSize));
+ __ Push(MemOperand(rsp, 2 * kPointerSize)); // Copy return address.
+ __ Push(kScratchRegister); // Save entry address for ret(0)
+
+ /* stack layout
+ 4: return address
+ 3: garbage
+ 2: garbage
+ 1: return address
+ 0: entry address <-- rsp
+ */
+
+ // Remember context pointer.
+ __ movp(kScratchRegister,
+ MemOperand(rbp, StandardFrameConstants::kContextOffset));
+ // Save context pointer into the stack frame.
+ __ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
+
+ // Create a stack frame.
+ __ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
+ __ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
+
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
+
+ /* stack layout
+ 4: old rbp
+ 3: context pointer
+ 2: stub marker
+ 1: return address
+ 0: entry address <-- rsp
+ */
+ __ ret(0);
}
+
return !is_aborted();
}
@@ -768,8 +804,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
__ bind(&done);
}
- Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
@@ -777,6 +813,7 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
@@ -2614,10 +2651,10 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ movp(temp, FieldOperand(temp, Map::kConstructorOffset));
+ __ GetMapConstructor(temp, temp, kScratchRegister);
// Objects with a non-function constructor have class 'Object'.
- __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
+ __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ j(not_equal, is_true);
} else {
@@ -2847,16 +2884,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
- }
-}
-
-
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@@ -2886,37 +2913,12 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Handle<Cell> cell_handle = instr->hydrogen()->cell().handle();
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- // We have a temp because CompareRoot might clobber kScratchRegister.
- Register cell = ToRegister(instr->temp());
- DCHECK(!value.is(cell));
- __ Move(cell, cell_handle, RelocInfo::CELL);
- __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
- // Store the value.
- __ movp(Operand(cell, 0), value);
- } else {
- // Store the value.
- __ Move(kScratchRegister, cell_handle, RelocInfo::CELL);
- __ movp(Operand(kScratchRegister, 0), value);
- }
- // Cells are always rescanned, so no write barrier here.
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3029,8 +3031,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL,
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3310,7 +3313,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3761,7 +3766,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ subq(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
}
- __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
+ __ roundsd(xmm_scratch, input_reg, kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
@@ -3996,14 +4001,8 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
void LCodeGen::DoMathClz32(LMathClz32* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Label not_zero_input;
- __ bsrl(result, input);
- __ j(not_zero, &not_zero_input);
- __ Set(result, 63); // 63^31 == 32
-
- __ bind(&not_zero_input);
- __ xorl(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+ __ Lzcntl(result, input);
}
@@ -4270,7 +4269,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
+ Handle<Code> ic =
+ StoreIC::initialize_stub(isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4529,8 +4530,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 2b67ce96cc..9e6a65526b 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -2083,14 +2083,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), rsi);
LOperand* global_object =
@@ -2106,16 +2098,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LOperand* value = UseRegister(instr->value());
- // Use a temp to avoid reloading the cell value address in the case where
- // we perform a hole check.
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
- : new(zone()) LStoreGlobalCell(value, NULL);
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index ec54c7d583..a7a1fe8796 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -100,7 +100,6 @@ class LCodeGen;
V(LoadRoot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
@@ -140,7 +139,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1695,13 +1693,6 @@ class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@@ -1723,21 +1714,6 @@ class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
- public:
- explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
- inputs_[0] = value;
- temps_[0] = temp;
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index a1172262b0..65b65a3622 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -14,7 +14,6 @@
#include "src/debug.h"
#include "src/heap/heap.h"
#include "src/isolate-inl.h"
-#include "src/serialize.h"
#include "src/x64/assembler-x64.h"
#include "src/x64/macro-assembler-x64.h"
@@ -177,6 +176,7 @@ void MacroAssembler::LoadRootIndexed(Register destination,
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
DCHECK(root_array_available_);
movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
source);
@@ -920,67 +920,13 @@ Register MacroAssembler::GetSmiConstant(Smi* source) {
xorl(kScratchRegister, kScratchRegister);
return kScratchRegister;
}
- if (value == 1) {
- return kSmiConstantRegister;
- }
LoadSmiConstant(kScratchRegister, source);
return kScratchRegister;
}
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
- if (emit_debug_code()) {
- Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
- Assembler::RelocInfoNone());
- cmpp(dst, kSmiConstantRegister);
- Assert(equal, kUninitializedKSmiConstantRegister);
- }
- int value = source->value();
- if (value == 0) {
- xorl(dst, dst);
- return;
- }
- bool negative = value < 0;
- unsigned int uvalue = negative ? -value : value;
-
- switch (uvalue) {
- case 9:
- leap(dst,
- Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0));
- break;
- case 8:
- xorl(dst, dst);
- leap(dst, Operand(dst, kSmiConstantRegister, times_8, 0));
- break;
- case 4:
- xorl(dst, dst);
- leap(dst, Operand(dst, kSmiConstantRegister, times_4, 0));
- break;
- case 5:
- leap(dst,
- Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0));
- break;
- case 3:
- leap(dst,
- Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0));
- break;
- case 2:
- leap(dst,
- Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0));
- break;
- case 1:
- movp(dst, kSmiConstantRegister);
- break;
- case 0:
- UNREACHABLE();
- return;
- default:
- Move(dst, source, Assembler::RelocInfoNone());
- return;
- }
- if (negative) {
- negp(dst);
- }
+ Move(dst, source, Assembler::RelocInfoNone());
}
@@ -1273,14 +1219,6 @@ Condition MacroAssembler::CheckEitherSmi(Register first,
}
-Condition MacroAssembler::CheckIsMinSmi(Register src) {
- DCHECK(!src.is(kScratchRegister));
- // If we overflow by subtracting one, it's the minimal smi value.
- cmpp(src, kSmiConstantRegister);
- return overflow;
-}
-
-
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
if (SmiValuesAre32Bits()) {
// A 32-bit integer value can always be converted to a smi.
@@ -1419,43 +1357,11 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
return;
} else if (dst.is(src)) {
DCHECK(!dst.is(kScratchRegister));
- switch (constant->value()) {
- case 1:
- addp(dst, kSmiConstantRegister);
- return;
- case 2:
- leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
- return;
- case 4:
- leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
- return;
- case 8:
- leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
- return;
- default:
- Register constant_reg = GetSmiConstant(constant);
- addp(dst, constant_reg);
- return;
- }
+ Register constant_reg = GetSmiConstant(constant);
+ addp(dst, constant_reg);
} else {
- switch (constant->value()) {
- case 1:
- leap(dst, Operand(src, kSmiConstantRegister, times_1, 0));
- return;
- case 2:
- leap(dst, Operand(src, kSmiConstantRegister, times_2, 0));
- return;
- case 4:
- leap(dst, Operand(src, kSmiConstantRegister, times_4, 0));
- return;
- case 8:
- leap(dst, Operand(src, kSmiConstantRegister, times_8, 0));
- return;
- default:
- LoadSmiConstant(dst, constant);
- addp(dst, src);
- return;
- }
+ LoadSmiConstant(dst, constant);
+ addp(dst, src);
}
}
@@ -2578,24 +2484,43 @@ void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
if (src == 0) {
xorps(dst, dst);
} else {
- movl(kScratchRegister, Immediate(src));
- movq(dst, kScratchRegister);
+ unsigned pop = base::bits::CountPopulation32(src);
+ DCHECK_NE(0u, pop);
+ if (pop == 32) {
+ pcmpeqd(dst, dst);
+ } else {
+ movl(kScratchRegister, Immediate(src));
+ movq(dst, kScratchRegister);
+ }
}
}
void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
- uint32_t lower = static_cast<uint32_t>(src);
- uint32_t upper = static_cast<uint32_t>(src >> 32);
- if (upper == 0) {
- Move(dst, lower);
+ if (src == 0) {
+ xorps(dst, dst);
} else {
- if (lower == 0) {
- Move(dst, upper);
- psllq(dst, 32);
+ unsigned nlz = base::bits::CountLeadingZeros64(src);
+ unsigned ntz = base::bits::CountTrailingZeros64(src);
+ unsigned pop = base::bits::CountPopulation64(src);
+ DCHECK_NE(0u, pop);
+ if (pop == 64) {
+ pcmpeqd(dst, dst);
+ } else if (pop + ntz == 64) {
+ pcmpeqd(dst, dst);
+ psllq(dst, ntz);
+ } else if (pop + nlz == 64) {
+ pcmpeqd(dst, dst);
+ psrlq(dst, nlz);
} else {
- movq(kScratchRegister, src);
- movq(dst, kScratchRegister);
+ uint32_t lower = static_cast<uint32_t>(src);
+ uint32_t upper = static_cast<uint32_t>(src >> 32);
+ if (upper == 0) {
+ Move(dst, lower);
+ } else {
+ movq(kScratchRegister, src);
+ movq(dst, kScratchRegister);
+ }
}
}
}
@@ -2770,15 +2695,13 @@ void MacroAssembler::Pop(const Operand& dst) {
popq(dst);
} else {
Register scratch = dst.AddressUsesRegister(kScratchRegister)
- ? kSmiConstantRegister : kScratchRegister;
+ ? kRootRegister : kScratchRegister;
movp(scratch, Operand(rsp, 0));
movp(dst, scratch);
leal(rsp, Operand(rsp, 4));
- if (scratch.is(kSmiConstantRegister)) {
- // Restore kSmiConstantRegister.
- movp(kSmiConstantRegister,
- reinterpret_cast<void*>(Smi::FromInt(kSmiConstantRegisterValue)),
- Assembler::RelocInfoNone());
+ if (scratch.is(kRootRegister)) {
+ // Restore kRootRegister.
+ InitializeRootRegister();
}
}
}
@@ -2909,6 +2832,81 @@ void MacroAssembler::Call(Handle<Code> code_object,
}
+void MacroAssembler::Pextrd(Register dst, XMMRegister src, int8_t imm8) {
+ if (imm8 == 0) {
+ movd(dst, src);
+ return;
+ }
+ DCHECK_EQ(1, imm8);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pextrd(dst, src, imm8);
+ return;
+ }
+ movq(dst, src);
+ shrq(dst, Immediate(32));
+}
+
+
+void MacroAssembler::Pinsrd(XMMRegister dst, Register src, int8_t imm8) {
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrd(dst, src, imm8);
+ return;
+ }
+ movd(xmm0, src);
+ if (imm8 == 1) {
+ punpckldq(dst, xmm0);
+ } else {
+ DCHECK_EQ(0, imm8);
+ psrlq(dst, 32);
+ punpckldq(xmm0, dst);
+ movaps(dst, xmm0);
+ }
+}
+
+
+void MacroAssembler::Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8) {
+ DCHECK(imm8 == 0 || imm8 == 1);
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ pinsrd(dst, src, imm8);
+ return;
+ }
+ movd(xmm0, src);
+ if (imm8 == 1) {
+ punpckldq(dst, xmm0);
+ } else {
+ DCHECK_EQ(0, imm8);
+ psrlq(dst, 32);
+ punpckldq(xmm0, dst);
+ movaps(dst, xmm0);
+ }
+}
+
+
+void MacroAssembler::Lzcntl(Register dst, Register src) {
+ // TODO(intel): Add support for LZCNT (BMI1/ABM).
+ Label not_zero_src;
+ bsrl(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 63); // 63^31 == 32
+ bind(&not_zero_src);
+ xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
+}
+
+
+void MacroAssembler::Lzcntl(Register dst, const Operand& src) {
+ // TODO(intel): Add support for LZCNT (BMI1/ABM).
+ Label not_zero_src;
+ bsrl(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Set(dst, 63); // 63^31 == 32
+ bind(&not_zero_src);
+ xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
+}
+
+
void MacroAssembler::Pushad() {
Push(rax);
Push(rcx);
@@ -2921,11 +2919,11 @@ void MacroAssembler::Pushad() {
Push(r9);
// r10 is kScratchRegister.
Push(r11);
- // r12 is kSmiConstantRegister.
+ Push(r12);
// r13 is kRootRegister.
Push(r14);
Push(r15);
- STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
+ STATIC_ASSERT(12 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
@@ -2940,6 +2938,7 @@ void MacroAssembler::Popad() {
leap(rsp, Operand(rsp, sp_delta));
Pop(r15);
Pop(r14);
+ Pop(r12);
Pop(r11);
Pop(r9);
Pop(r8);
@@ -2973,10 +2972,10 @@ MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
7,
-1,
8,
- -1,
- -1,
9,
- 10
+ -1,
+ 10,
+ 11
};
@@ -3001,46 +3000,21 @@ Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
}
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
+void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
- kFPOnStackSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // We will build up the handler from the bottom by pushing on the stack.
- // First push the frame pointer and context.
- if (kind == StackHandler::JS_ENTRY) {
- // The frame pointer does not point to a JS frame so we save NULL for
- // rbp. We expect the code throwing an exception to check rbp before
- // dereferencing it to restore the context.
- pushq(Immediate(0)); // NULL frame pointer.
- Push(Smi::FromInt(0)); // No context.
- } else {
- pushq(rbp);
- Push(rsi);
- }
-
- // Push the state and the code object.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- Push(Immediate(state));
- Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
Push(ExternalOperand(handler_address));
+
// Set this new handler as the current one.
movp(ExternalOperand(handler_address), rsp);
}
-void MacroAssembler::PopTryHandler() {
+void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
Pop(ExternalOperand(handler_address));
@@ -3048,106 +3022,6 @@ void MacroAssembler::PopTryHandler() {
}
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // rax = exception, rdi = code object, rdx = state.
- movp(rbx, FieldOperand(rdi, Code::kHandlerTableOffset));
- shrp(rdx, Immediate(StackHandler::kKindWidth));
- movp(rdx,
- FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize));
- SmiToInteger64(rdx, rdx);
- leap(rdi, FieldOperand(rdi, rdx, times_1, Code::kHeaderSize));
- jmp(rdi);
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
- kFPOnStackSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in rax.
- if (!value.is(rax)) {
- movp(rax, value);
- }
- // Drop the stack pointer to the top of the top handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- movp(rsp, ExternalOperand(handler_address));
- // Restore the next handler.
- Pop(ExternalOperand(handler_address));
-
- // Remove the code object and state, compute the handler address in rdi.
- Pop(rdi); // Code object.
- Pop(rdx); // Offset and state.
-
- // Restore the context and frame pointer.
- Pop(rsi); // Context.
- popq(rbp); // Frame pointer.
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (rbp == 0) == (rsi == 0), so we could test either
- // rbp or rsi.
- Label skip;
- testp(rsi, rsi);
- j(zero, &skip, Label::kNear);
- movp(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
- bind(&skip);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize +
- kFPOnStackSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in rax.
- if (!value.is(rax)) {
- movp(rax, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- Load(rsp, handler_address);
-
- // Unwind the handlers until the top ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind, Label::kNear);
- bind(&fetch_next);
- movp(rsp, Operand(rsp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- testl(Operand(rsp, StackHandlerConstants::kStateOffset),
- Immediate(StackHandler::KindField::kMask));
- j(not_zero, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- Pop(ExternalOperand(handler_address));
-
- // Remove the code object and state, compute the handler address in rdi.
- Pop(rdi); // Code object.
- Pop(rdx); // Offset and state.
-
- // Clear the context pointer and frame pointer (0 was saved in the handler).
- Pop(rsi);
- popq(rbp);
-
- JumpToHandlerEntry();
-}
-
-
void MacroAssembler::Ret() {
ret(0);
}
@@ -3565,6 +3439,20 @@ Condition MacroAssembler::IsObjectNameType(Register heap_object,
}
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+ Register temp) {
+ Label done, loop;
+ movp(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
+ bind(&loop);
+ JumpIfSmi(result, &done);
+ CmpObjectType(result, MAP_TYPE, temp);
+ j(not_equal, &done);
+ movp(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
+ jmp(&loop);
+ bind(&done);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Label* miss,
@@ -3618,7 +3506,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- movp(result, FieldOperand(result, Map::kConstructorOffset));
+ GetMapConstructor(result, result, kScratchRegister);
}
// All done.
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 9f25d60ddf..a851a46a75 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -17,10 +17,7 @@ namespace internal {
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
const Register kScratchRegister = { 10 }; // r10.
-const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
const Register kRootRegister = { 13 }; // r13 (callee save).
-// Value of smi in kSmiConstantRegister.
-const int kSmiConstantRegisterValue = 1;
// Actual value of root register is offset from the root array's start
// to take advantage of negitive 8-bit displacement values.
const int kRootRegisterBias = 128;
@@ -390,11 +387,6 @@ class MacroAssembler: public Assembler {
void SafeMove(Register dst, Smi* src);
void SafePush(Smi* src);
- void InitializeSmiConstantRegister() {
- Move(kSmiConstantRegister, Smi::FromInt(kSmiConstantRegisterValue),
- Assembler::RelocInfoNone());
- }
-
// Conversions between tagged smi values and non-tagged integer values.
// Tag an integer value. The result must be known to be a valid smi value.
@@ -474,11 +466,6 @@ class MacroAssembler: public Assembler {
Register second,
Register scratch = kScratchRegister);
- // Is the value the minimum smi value (since we are using
- // two's complement numbers, negating the value is known to yield
- // a non-smi value).
- Condition CheckIsMinSmi(Register src);
-
// Checks whether an 32-bit integer value is a valid for conversion
// to a smi.
Condition CheckInteger32ValidSmiValue(Register src);
@@ -935,10 +922,18 @@ class MacroAssembler: public Assembler {
Call(self, RelocInfo::CODE_TARGET);
}
+ // Non-SSE2 instructions.
+ void Pextrd(Register dst, XMMRegister src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, Register src, int8_t imm8);
+ void Pinsrd(XMMRegister dst, const Operand& src, int8_t imm8);
+
+ void Lzcntl(Register dst, Register src);
+ void Lzcntl(Register dst, const Operand& src);
+
// Non-x64 instructions.
// Push/pop all general purpose registers.
// Does not push rsp/rbp nor any of the assembler's special purpose registers
- // (kScratchRegister, kSmiConstantRegister, kRootRegister).
+ // (kScratchRegister, kRootRegister).
void Pushad();
void Popad();
// Sets the stack as after performing Popad, without actually loading the
@@ -1114,18 +1109,11 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link it into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
+ // Push a new stack handler and link it into stack handler chain.
+ void PushStackHandler();
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Activate the top handler in the try hander chain and pass the
- // thrown value.
- void Throw(Register value);
-
- // Propagate an uncatchable exception out of the current JS stack.
- void ThrowUncatchable(Register value);
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -1246,6 +1234,10 @@ class MacroAssembler: public Assembler {
void NegativeZeroTest(Register result, Register op1, Register op2,
Register scratch, Label* then_label);
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done.
+ void GetMapConstructor(Register result, Register map, Register temp);
+
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
@@ -1464,9 +1456,9 @@ class MacroAssembler: public Assembler {
private:
// Order general registers are pushed by Pushad.
- // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
+ // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14, r15.
static const int kSafepointPushRegisterIndices[Register::kNumRegisters];
- static const int kNumSafepointSavedRegisters = 11;
+ static const int kNumSafepointSavedRegisters = 12;
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool generating_stub_;
@@ -1538,10 +1530,6 @@ class MacroAssembler: public Assembler {
Register bitmap_reg,
Register mask_reg);
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code) {
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 1818dbb72c..da55112933 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -11,7 +11,6 @@
#include "src/macro-assembler.h"
#include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h"
-#include "src/serialize.h"
#include "src/unicode.h"
#include "src/x64/regexp-macro-assembler-x64.h"
@@ -43,10 +42,9 @@ namespace internal {
*
* The registers rax, rbx, r9 and r11 are free to use for computations.
* If changed to use r12+, they should be saved as callee-save registers.
- * The macro assembler special registers r12 and r13 (kSmiConstantRegister,
- * kRootRegister) aren't special during execution of RegExp code (they don't
- * hold the values assumed when creating JS code), so no Smi or Root related
- * macro operations can be used.
+ * The macro assembler special register r13 (kRootRegister) isn't special
+ * during execution of RegExp code (it doesn't hold the value assumed when
+ * creating JS code), so Root related macro operations can be used.
*
* Each call to a C++ method should retain these registers.
*
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index 6555ccdd83..800ead2803 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -155,12 +155,24 @@ void RelocInfo::set_target_object(Object* target,
}
-Address RelocInfo::target_reference() {
+Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
return Memory::Address_at(pc_);
}
+Address RelocInfo::target_internal_reference() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return Memory::Address_at(pc_);
+}
+
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return reinterpret_cast<Address>(pc_);
+}
+
+
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
@@ -269,7 +281,8 @@ Object** RelocInfo::call_object_address() {
void RelocInfo::WipeOut() {
- if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_)) {
+ if (IsEmbeddedObject(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
} else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
// Effectively write zero into the relocation.
@@ -301,7 +314,8 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
visitor->VisitCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ visitor->VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
visitor->VisitCodeAgeSequence(this);
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -328,7 +342,8 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
- CpuFeatures::FlushICache(pc_, sizeof(Address));
+ } else if (mode == RelocInfo::INTERNAL_REFERENCE) {
+ StaticVisitor::VisitInternalReference(this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
StaticVisitor::VisitCodeAgeSequence(heap, this);
} else if (heap->isolate()->debug()->has_break_points() &&
@@ -517,6 +532,12 @@ void Assembler::emit_near_disp(Label* L) {
}
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ Memory::Address_at(pc) = target;
+}
+
+
void Operand::set_modrm(int mod, Register rm) {
DCHECK((mod & -4) == 0);
buf_[0] = mod << 6 | rm.code();
diff --git a/deps/v8/src/x87/assembler-x87.cc b/deps/v8/src/x87/assembler-x87.cc
index f2db021b69..43694ded6b 100644
--- a/deps/v8/src/x87/assembler-x87.cc
+++ b/deps/v8/src/x87/assembler-x87.cc
@@ -42,7 +42,6 @@
#include "src/base/cpu.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -102,17 +101,6 @@ bool RelocInfo::IsInConstantPool() {
}
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- for (int i = 0; i < instruction_count; i++) {
- *(pc_ + i) = *(instructions + i);
- }
-
- // Indicate that code has changed.
- CpuFeatures::FlushICache(pc_, instruction_count);
-}
-
-
// Patch the code at the current PC with a call to the target address.
// Additional guard int3 instructions can be added if required.
void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
@@ -123,7 +111,7 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
// Create a code patcher.
CodePatcher patcher(pc_, code_size);
- // Add a label for checking the size of the code used for returning.
+// Add a label for checking the size of the code used for returning.
#ifdef DEBUG
Label check_codesize;
patcher.masm()->bind(&check_codesize);
diff --git a/deps/v8/src/x87/assembler-x87.h b/deps/v8/src/x87/assembler-x87.h
index 05359648ad..67af72e0c2 100644
--- a/deps/v8/src/x87/assembler-x87.h
+++ b/deps/v8/src/x87/assembler-x87.h
@@ -40,8 +40,8 @@
#include <deque>
#include "src/assembler.h"
+#include "src/compiler.h"
#include "src/isolate.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -529,6 +529,11 @@ class Assembler : public AssemblerBase {
set_target_address_at(instruction_payload, code, target);
}
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
static const int kSpecialTargetSize = kPointerSize;
// Distance between the address of the code target in the call instruction
@@ -941,7 +946,7 @@ class Assembler : public AssemblerBase {
// Record a deoptimization reason that can be used by a log or cpu profiler.
// Use --trace-deopt to enable.
- void RecordDeoptReason(const int reason, const int raw_position);
+ void RecordDeoptReason(const int reason, const SourcePosition position);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
diff --git a/deps/v8/src/x87/builtins-x87.cc b/deps/v8/src/x87/builtins-x87.cc
index 9fda5a7188..50c3c67ae0 100644
--- a/deps/v8/src/x87/builtins-x87.cc
+++ b/deps/v8/src/x87/builtins-x87.cc
@@ -990,42 +990,116 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
}
-void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
- static const int kArgumentsOffset = 2 * kPointerSize;
- static const int kReceiverOffset = 3 * kPointerSize;
- static const int kFunctionOffset = 4 * kPointerSize;
+static void Generate_CheckStackOverflow(MacroAssembler* masm,
+ const int calleeOffset) {
+ // eax : the number of items to be pushed to the stack
+ //
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ ExternalReference real_stack_limit =
+ ExternalReference::address_of_real_stack_limit(masm->isolate());
+ __ mov(edi, Operand::StaticVariable(real_stack_limit));
+ // Make ecx the space we have left. The stack might already be overflowed
+ // here which will cause ecx to become negative.
+ __ mov(ecx, esp);
+ __ sub(ecx, edi);
+ // Make edx the space we need for the array when it is unrolled onto the
+ // stack.
+ __ mov(edx, eax);
+ __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
+ // Check if the arguments will overflow the stack.
+ __ cmp(ecx, edx);
+ __ j(greater, &okay); // Signed comparison.
+
+ // Out of stack space.
+ __ push(Operand(ebp, calleeOffset)); // push this
+ __ push(eax);
+ __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+
+ __ bind(&okay);
+}
+
+
+static void Generate_PushAppliedArguments(MacroAssembler* masm,
+ const int argumentsOffset,
+ const int indexOffset,
+ const int limitOffset) {
+ // Copy all arguments from the array to the stack.
+ Label entry, loop;
+ Register receiver = LoadDescriptor::ReceiverRegister();
+ Register key = LoadDescriptor::NameRegister();
+ __ mov(key, Operand(ebp, indexOffset));
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(receiver, Operand(ebp, argumentsOffset)); // load arguments
+
+ if (FLAG_vector_ics) {
+ // TODO(mvstanton): Vector-based ics need additional infrastructure to
+ // be embedded here. For now, just call the runtime.
+ __ push(receiver);
+ __ push(key);
+ __ CallRuntime(Runtime::kGetProperty, 2);
+ } else {
+ // Use inline caching to speed up access to arguments.
+ Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // It is important that we do not have a test instruction after the
+ // call. A test instruction after the call is used to indicate that
+ // we have generated an inline version of the keyed load. In this
+ // case, we know that we are not generating a test instruction next.
+ }
+
+ // Push the nth argument.
+ __ push(eax);
+
+ // Update the index on the stack and in register key.
+ __ mov(key, Operand(ebp, indexOffset));
+ __ add(key, Immediate(1 << kSmiTagSize));
+ __ mov(Operand(ebp, indexOffset), key);
+
+ __ bind(&entry);
+ __ cmp(key, Operand(ebp, limitOffset));
+ __ j(not_equal, &loop);
+
+ // On exit, the pushed arguments count is in eax, untagged
+ __ Move(eax, key);
+ __ SmiUntag(eax);
+}
+
+
+// Used by FunctionApply and ReflectApply
+static void Generate_ApplyHelper(MacroAssembler* masm, bool targetIsArgument) {
+ const int kFormalParameters = targetIsArgument ? 3 : 2;
+ const int kStackSize = kFormalParameters + 1;
+
+ // Stack at entry:
+ // esp : return address
+ // esp[4] : arguments
+ // esp[8] : receiver ("this")
+ // esp[12] : function
{
FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Stack frame:
+ // ebp : Old base pointer
+ // ebp[4] : return address
+ // ebp[8] : function arguments
+ // ebp[12] : receiver
+ // ebp[16] : function
+ static const int kArgumentsOffset = kFPOnStackSize + kPCOnStackSize;
+ static const int kReceiverOffset = kArgumentsOffset + kPointerSize;
+ static const int kFunctionOffset = kReceiverOffset + kPointerSize;
__ push(Operand(ebp, kFunctionOffset)); // push this
__ push(Operand(ebp, kArgumentsOffset)); // push arguments
- __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
-
- // Check the stack for overflow. We are not trying to catch
- // interruptions (e.g. debug break and preemption) here, so the "real stack
- // limit" is checked.
- Label okay;
- ExternalReference real_stack_limit =
- ExternalReference::address_of_real_stack_limit(masm->isolate());
- __ mov(edi, Operand::StaticVariable(real_stack_limit));
- // Make ecx the space we have left. The stack might already be overflowed
- // here which will cause ecx to become negative.
- __ mov(ecx, esp);
- __ sub(ecx, edi);
- // Make edx the space we need for the array when it is unrolled onto the
- // stack.
- __ mov(edx, eax);
- __ shl(edx, kPointerSizeLog2 - kSmiTagSize);
- // Check if the arguments will overflow the stack.
- __ cmp(ecx, edx);
- __ j(greater, &okay); // Signed comparison.
-
- // Out of stack space.
- __ push(Operand(ebp, 4 * kPointerSize)); // push this
- __ push(eax);
- __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
- __ bind(&okay);
- // End of stack check.
+ if (targetIsArgument) {
+ __ InvokeBuiltin(Builtins::REFLECT_APPLY_PREPARE, CALL_FUNCTION);
+ } else {
+ __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+ }
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
// Push current index and limit.
const int kLimitOffset =
@@ -1088,55 +1162,20 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ bind(&push_receiver);
__ push(ebx);
- // Copy all arguments from the array to the stack.
- Label entry, loop;
- Register receiver = LoadDescriptor::ReceiverRegister();
- Register key = LoadDescriptor::NameRegister();
- __ mov(key, Operand(ebp, kIndexOffset));
- __ jmp(&entry);
- __ bind(&loop);
- __ mov(receiver, Operand(ebp, kArgumentsOffset)); // load arguments
-
- if (FLAG_vector_ics) {
- // TODO(mvstanton): Vector-based ics need additional infrastructure to
- // be embedded here. For now, just call the runtime.
- __ push(receiver);
- __ push(key);
- __ CallRuntime(Runtime::kGetProperty, 2);
- } else {
- // Use inline caching to speed up access to arguments.
- Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
- __ call(ic, RelocInfo::CODE_TARGET);
- // It is important that we do not have a test instruction after the
- // call. A test instruction after the call is used to indicate that
- // we have generated an inline version of the keyed load. In this
- // case, we know that we are not generating a test instruction next.
- }
-
- // Push the nth argument.
- __ push(eax);
-
- // Update the index on the stack and in register key.
- __ mov(key, Operand(ebp, kIndexOffset));
- __ add(key, Immediate(1 << kSmiTagSize));
- __ mov(Operand(ebp, kIndexOffset), key);
-
- __ bind(&entry);
- __ cmp(key, Operand(ebp, kLimitOffset));
- __ j(not_equal, &loop);
+ // Loop over the arguments array, pushing each value to the stack
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
// Call the function.
Label call_proxy;
ParameterCount actual(eax);
- __ Move(eax, key);
- __ SmiUntag(eax);
__ mov(edi, Operand(ebp, kFunctionOffset));
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &call_proxy);
__ InvokeFunction(edi, actual, CALL_FUNCTION, NullCallWrapper());
frame_scope.GenerateLeaveFrame();
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
// Call the function proxy.
__ bind(&call_proxy);
@@ -1149,7 +1188,92 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Leave internal frame.
}
- __ ret(3 * kPointerSize); // remove this, receiver, and arguments
+ __ ret(kStackSize * kPointerSize); // remove this, receiver, and arguments
+}
+
+
+// Used by ReflectConstruct
+static void Generate_ConstructHelper(MacroAssembler* masm) {
+ const int kFormalParameters = 3;
+ const int kStackSize = kFormalParameters + 1;
+
+ // Stack at entry:
+ // esp : return address
+ // esp[4] : original constructor (new.target)
+ // esp[8] : arguments
+ // esp[16] : constructor
+ {
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Stack frame:
+ // ebp : Old base pointer
+ // ebp[4] : return address
+ // ebp[8] : original constructor (new.target)
+ // ebp[12] : arguments
+ // ebp[16] : constructor
+ static const int kNewTargetOffset = kFPOnStackSize + kPCOnStackSize;
+ static const int kArgumentsOffset = kNewTargetOffset + kPointerSize;
+ static const int kFunctionOffset = kArgumentsOffset + kPointerSize;
+
+ // If newTarget is not supplied, set it to constructor
+ Label validate_arguments;
+ __ mov(eax, Operand(ebp, kNewTargetOffset));
+ __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &validate_arguments, Label::kNear);
+ __ mov(eax, Operand(ebp, kFunctionOffset));
+ __ mov(Operand(ebp, kNewTargetOffset), eax);
+
+ // Validate arguments
+ __ bind(&validate_arguments);
+ __ push(Operand(ebp, kFunctionOffset));
+ __ push(Operand(ebp, kArgumentsOffset));
+ __ push(Operand(ebp, kNewTargetOffset));
+ __ InvokeBuiltin(Builtins::REFLECT_CONSTRUCT_PREPARE, CALL_FUNCTION);
+
+ Generate_CheckStackOverflow(masm, kFunctionOffset);
+
+ // Push current index and limit.
+ const int kLimitOffset =
+ StandardFrameConstants::kExpressionsOffset - 1 * kPointerSize;
+ const int kIndexOffset = kLimitOffset - 1 * kPointerSize;
+ __ Push(eax); // limit
+ __ push(Immediate(0)); // index
+ // Push newTarget and callee functions
+ __ push(Operand(ebp, kNewTargetOffset));
+ __ push(Operand(ebp, kFunctionOffset));
+
+ // Loop over the arguments array, pushing each value to the stack
+ Generate_PushAppliedArguments(
+ masm, kArgumentsOffset, kIndexOffset, kLimitOffset);
+
+ // Use undefined feedback vector
+ __ LoadRoot(ebx, Heap::kUndefinedValueRootIndex);
+ __ mov(edi, Operand(ebp, kFunctionOffset));
+
+ // Call the function.
+ CallConstructStub stub(masm->isolate(), SUPER_CONSTRUCTOR_CALL);
+ __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+
+ __ Drop(1);
+
+ // Leave internal frame.
+ }
+ // remove this, target, arguments, and newTarget
+ __ ret(kStackSize * kPointerSize);
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, false);
+}
+
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ Generate_ApplyHelper(masm, true);
+}
+
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ Generate_ConstructHelper(masm);
}
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 58200bca82..841e0f11a8 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -12,6 +12,7 @@
#include "src/codegen.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
#include "src/isolate.h"
#include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h"
@@ -410,7 +411,7 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
__ ret(0);
StubRuntimeCallHelper call_helper;
- char_at_generator.GenerateSlow(masm, call_helper);
+ char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
@@ -759,8 +760,15 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
__ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
if (has_new_target()) {
+ // If the constructor was [[Call]]ed, the call will not push a new.target
+ // onto the stack. In that case the arguments array we construct is bogus,
+ // bu we do not care as the constructor throws immediately.
+ __ cmp(ecx, Immediate(Smi::FromInt(0)));
+ Label skip_decrement;
+ __ j(equal, &skip_decrement);
// Subtract 1 from smi-tagged arguments count.
__ sub(ecx, Immediate(2));
+ __ bind(&skip_decrement);
}
__ lea(edx, Operand(edx, ecx, times_2,
@@ -870,7 +878,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
#ifdef V8_INTERPRETED_REGEXP
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
#else // V8_INTERPRETED_REGEXP
// Stack frame on entry.
@@ -1146,22 +1154,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand::StaticVariable(pending_exception));
__ cmp(edx, eax);
__ j(equal, &runtime);
- // For exception, throw the exception again.
-
- // Clear the pending exception variable.
- __ mov(Operand::StaticVariable(pending_exception), edx);
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ cmp(eax, factory->termination_exception());
- Label throw_termination_exception;
- __ j(equal, &throw_termination_exception, Label::kNear);
-
- // Handle normal exception by following handler chain.
- __ Throw(eax);
-
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(eax);
+ // For exception, throw the exception again.
+ __ TailCallRuntime(Runtime::kRegExpExecReThrow, 4, 1);
__ bind(&failure);
// For failure to match, return null.
@@ -1247,7 +1242,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Do the runtime call to execute the regexp.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+ __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
// Deferred code for string handling.
// (7) Not a long external string? If yes, go to (10).
@@ -2208,15 +2203,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ cmp(eax, isolate()->factory()->exception());
__ j(equal, &exception_returned);
- ExternalReference pending_exception_address(
- Isolate::kPendingExceptionAddress, isolate());
-
// Check that there is no pending exception, otherwise we
// should have returned the exception sentinel.
if (FLAG_debug_code) {
__ push(edx);
__ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
Label okay;
+ ExternalReference pending_exception_address(
+ Isolate::kPendingExceptionAddress, isolate());
__ cmp(edx, Operand::StaticVariable(pending_exception_address));
// Cannot use check here as it attempts to generate call into runtime.
__ j(equal, &okay, Label::kNear);
@@ -2232,24 +2226,48 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Handling of exception.
__ bind(&exception_returned);
- // Retrieve the pending exception.
- __ mov(eax, Operand::StaticVariable(pending_exception_address));
-
- // Clear the pending exception.
- __ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
- __ mov(Operand::StaticVariable(pending_exception_address), edx);
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- Label throw_termination_exception;
- __ cmp(eax, isolate()->factory()->termination_exception());
- __ j(equal, &throw_termination_exception);
-
- // Handle normal exception.
- __ Throw(eax);
+ ExternalReference pending_handler_context_address(
+ Isolate::kPendingHandlerContextAddress, isolate());
+ ExternalReference pending_handler_code_address(
+ Isolate::kPendingHandlerCodeAddress, isolate());
+ ExternalReference pending_handler_offset_address(
+ Isolate::kPendingHandlerOffsetAddress, isolate());
+ ExternalReference pending_handler_fp_address(
+ Isolate::kPendingHandlerFPAddress, isolate());
+ ExternalReference pending_handler_sp_address(
+ Isolate::kPendingHandlerSPAddress, isolate());
+
+ // Ask the runtime for help to determine the handler. This will set eax to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler(Runtime::kFindExceptionHandler, isolate());
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, eax);
+ __ mov(Operand(esp, 0 * kPointerSize), Immediate(0)); // argc.
+ __ mov(Operand(esp, 1 * kPointerSize), Immediate(0)); // argv.
+ __ mov(Operand(esp, 2 * kPointerSize),
+ Immediate(ExternalReference::isolate_address(isolate())));
+ __ CallCFunction(find_handler, 3);
+ }
- __ bind(&throw_termination_exception);
- __ ThrowUncatchable(eax);
+ // Retrieve the handler context, SP and FP.
+ __ mov(esi, Operand::StaticVariable(pending_handler_context_address));
+ __ mov(esp, Operand::StaticVariable(pending_handler_sp_address));
+ __ mov(ebp, Operand::StaticVariable(pending_handler_fp_address));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (esi == 0) for non-JS frames.
+ Label skip;
+ __ test(esi, esi);
+ __ j(zero, &skip, Label::kNear);
+ __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ __ bind(&skip);
+
+ // Compute the handler entry address and jump to it.
+ __ mov(edi, Operand::StaticVariable(pending_handler_code_address));
+ __ mov(edx, Operand::StaticVariable(pending_handler_offset_address));
+ __ lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
+ __ jmp(edi);
}
@@ -2299,10 +2317,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ mov(eax, Immediate(isolate()->factory()->exception()));
__ jmp(&exit);
- // Invoke: Link this frame into the handler chain. There's only one
- // handler block in this code object, so its index is 0.
+ // Invoke: Link this frame into the handler chain.
__ bind(&invoke);
- __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+ __ PushStackHandler();
// Clear any pending exceptions.
__ mov(edx, Immediate(isolate()->factory()->the_hole_value()));
@@ -2328,7 +2345,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ call(edx);
// Unlink this frame from the handler chain.
- __ PopTryHandler();
+ __ PopStackHandler();
__ bind(&exit);
// Check if the current stack frame is marked as the outermost JS frame.
@@ -2614,7 +2631,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
void StringCharCodeAtGenerator::GenerateSlow(
- MacroAssembler* masm,
+ MacroAssembler* masm, EmbedMode embed_mode,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
@@ -2626,6 +2643,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
index_not_number_,
DONT_DO_SMI_CHECK);
call_helper.BeforeCall(masm);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ push(VectorLoadICDescriptor::VectorRegister());
+ __ push(VectorLoadICDescriptor::SlotRegister());
+ }
__ push(object_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
@@ -2641,6 +2662,10 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ mov(index_, eax);
}
__ pop(object_);
+ if (FLAG_vector_ics && embed_mode == PART_OF_IC_HANDLER) {
+ __ pop(VectorLoadICDescriptor::SlotRegister());
+ __ pop(VectorLoadICDescriptor::VectorRegister());
+ }
// Reload the instance type.
__ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
__ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
@@ -2956,7 +2981,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to create the sub string.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kSubString, 3, 1);
+ __ TailCallRuntime(Runtime::kSubStringRT, 3, 1);
__ bind(&single_char);
// eax: string
@@ -3179,7 +3204,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
// Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
@@ -3240,7 +3265,7 @@ void CompareICStub::GenerateSmis(MacroAssembler* masm) {
void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
DCHECK(state() == CompareICState::NUMBER);
- Label generic_stub;
+ Label generic_stub, check_left;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
@@ -3253,13 +3278,13 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or SSE2 or CMOV is unsupported.
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
-
+ __ JumpIfSmi(eax, &check_left, Label::kNear);
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined1, Label::kNear);
+
+ __ bind(&check_left);
+ __ JumpIfSmi(edx, &generic_stub, Label::kNear);
__ cmp(FieldOperand(edx, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
__ j(not_equal, &maybe_undefined2, Label::kNear);
@@ -3463,7 +3488,7 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
if (equality) {
__ TailCallRuntime(Runtime::kStringEquals, 2, 1);
} else {
- __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+ __ TailCallRuntime(Runtime::kStringCompareRT, 2, 1);
}
__ bind(&miss);
@@ -4041,15 +4066,234 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorLoadStub stub(isolate(), state());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawLoadStub stub(isolate(), state());
+ stub.GenerateForTrampoline(masm);
}
void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
- VectorKeyedLoadStub stub(isolate());
- __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+ VectorRawKeyedLoadStub stub(isolate());
+ stub.GenerateForTrampoline(masm);
+}
+
+
+static void HandleArrayCases(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register feedback, bool is_polymorphic,
+ Label* miss) {
+ // feedback initially contains the feedback array
+ Label next, next_loop, prepare_next;
+ Label load_smi_map, compare_map;
+ Label start_polymorphic;
+
+ __ push(receiver);
+ __ push(vector);
+
+ Register receiver_map = receiver;
+ Register cached_map = vector;
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &load_smi_map);
+ __ mov(receiver_map, FieldOperand(receiver, 0));
+ __ bind(&compare_map);
+ __ mov(cached_map, FieldOperand(feedback, FixedArray::OffsetOfElementAt(0)));
+
+ // A named keyed load might have a 2 element array, all other cases can count
+ // on an array with at least 2 {map, handler} pairs, so they can go right
+ // into polymorphic array handling.
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, is_polymorphic ? &start_polymorphic : &next);
+
+ // found, now call handler.
+ Register handler = feedback;
+ __ mov(handler, FieldOperand(feedback, FixedArray::OffsetOfElementAt(1)));
+ __ pop(vector);
+ __ pop(receiver);
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+
+ if (!is_polymorphic) {
+ __ bind(&next);
+ __ cmp(FieldOperand(feedback, FixedArray::kLengthOffset),
+ Immediate(Smi::FromInt(2)));
+ __ j(not_equal, &start_polymorphic);
+ __ pop(vector);
+ __ pop(receiver);
+ __ jmp(miss);
+ }
+
+ // Polymorphic, we have to loop from 2 to N
+ __ bind(&start_polymorphic);
+ __ push(key);
+ Register counter = key;
+ __ mov(counter, Immediate(Smi::FromInt(2)));
+ __ bind(&next_loop);
+ __ mov(cached_map, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(receiver_map, FieldOperand(cached_map, WeakCell::kValueOffset));
+ __ j(not_equal, &prepare_next);
+ __ mov(handler, FieldOperand(feedback, counter, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+
+ __ bind(&prepare_next);
+ __ add(counter, Immediate(Smi::FromInt(2)));
+ __ cmp(counter, FieldOperand(feedback, FixedArray::kLengthOffset));
+ __ j(less, &next_loop);
+
+ // We exhausted our array of map handler pairs.
+ __ pop(key);
+ __ pop(vector);
+ __ pop(receiver);
+ __ jmp(miss);
+
+ __ bind(&load_smi_map);
+ __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
+ __ jmp(&compare_map);
+}
+
+
+static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
+ Register key, Register vector, Register slot,
+ Register weak_cell, Label* miss) {
+ // feedback initially contains the feedback array
+ Label compare_smi_map;
+
+ // Move the weak map into the weak_cell register.
+ Register ic_map = weak_cell;
+ __ mov(ic_map, FieldOperand(weak_cell, WeakCell::kValueOffset));
+
+ // Receiver might not be a heap object.
+ __ JumpIfSmi(receiver, &compare_smi_map);
+ __ cmp(ic_map, FieldOperand(receiver, 0));
+ __ j(not_equal, miss);
+ Register handler = weak_cell;
+ __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+
+ // In microbenchmarks, it made sense to unroll this code so that the call to
+ // the handler is duplicated for a HeapObject receiver and a Smi receiver.
+ __ bind(&compare_smi_map);
+ __ CompareRoot(ic_map, Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, miss);
+ __ mov(handler, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ __ lea(handler, FieldOperand(handler, Code::kHeaderSize));
+ __ jmp(handler);
+}
+
+
+void VectorRawLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // edx
+ Register name = VectorLoadICDescriptor::NameRegister(); // ecx
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // ebx
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // eax
+ Register scratch = edi;
+ __ mov(scratch, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicCase(masm, receiver, name, vector, slot, scratch, &miss);
+
+ // Is it a fixed array?
+ __ bind(&try_array);
+ __ CompareRoot(FieldOperand(scratch, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+ HandleArrayCases(masm, receiver, name, vector, slot, scratch, true, &miss);
+
+ __ bind(&not_array);
+ __ CompareRoot(scratch, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &miss);
+ __ push(slot);
+ __ push(vector);
+ Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+ Code::ComputeHandlerFlags(Code::LOAD_IC));
+ masm->isolate()->stub_cache()->GenerateProbe(
+ masm, Code::LOAD_IC, code_flags, false, receiver, name, vector, scratch);
+ __ pop(vector);
+ __ pop(slot);
+
+ __ bind(&miss);
+ LoadIC::GenerateMiss(masm);
+}
+
+
+void VectorRawKeyedLoadStub::Generate(MacroAssembler* masm) {
+ GenerateImpl(masm, false);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateForTrampoline(MacroAssembler* masm) {
+ GenerateImpl(masm, true);
+}
+
+
+void VectorRawKeyedLoadStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
+ Register receiver = VectorLoadICDescriptor::ReceiverRegister(); // edx
+ Register key = VectorLoadICDescriptor::NameRegister(); // ecx
+ Register vector = VectorLoadICDescriptor::VectorRegister(); // ebx
+ Register slot = VectorLoadICDescriptor::SlotRegister(); // eax
+ Register feedback = edi;
+ __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ // Is it a weak cell?
+ Label try_array;
+ Label not_array, smi_key, key_okay, miss;
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kWeakCellMapRootIndex);
+ __ j(not_equal, &try_array);
+ HandleMonomorphicCase(masm, receiver, key, vector, slot, feedback, &miss);
+
+ __ bind(&try_array);
+ // Is it a fixed array?
+ __ CompareRoot(FieldOperand(feedback, 0), Heap::kFixedArrayMapRootIndex);
+ __ j(not_equal, &not_array);
+
+ // We have a polymorphic element handler.
+ Label polymorphic, try_poly_name;
+ __ bind(&polymorphic);
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, true, &miss);
+
+ __ bind(&not_array);
+ // Is it generic?
+ __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
+ __ j(not_equal, &try_poly_name);
+ Handle<Code> megamorphic_stub =
+ KeyedLoadIC::ChooseMegamorphicStub(masm->isolate());
+ __ jmp(megamorphic_stub, RelocInfo::CODE_TARGET);
+
+ __ bind(&try_poly_name);
+ // We might have a name in feedback, and a fixed array in the next slot.
+ __ cmp(key, feedback);
+ __ j(not_equal, &miss);
+ // If the name comparison succeeded, we know we have a fixed array with
+ // at least one map/handler pair.
+ __ mov(feedback, FieldOperand(vector, slot, times_half_pointer_size,
+ FixedArray::kHeaderSize + kPointerSize));
+ HandleArrayCases(masm, receiver, key, vector, slot, feedback, false, &miss);
+
+ __ bind(&miss);
+ KeyedLoadIC::GenerateMiss(masm);
}
@@ -4535,7 +4779,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ mov(eax, return_value_operand);
Label promote_scheduled_exception;
- Label exception_handled;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -4547,7 +4790,17 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Assert(above_equal, kInvalidHandleScopeLevel);
__ cmp(edi, Operand::StaticVariable(limit_address));
__ j(not_equal, &delete_allocated_handles);
+
+ // Leave the API exit frame.
__ bind(&leave_exit_frame);
+ bool restore_context = context_restore_operand != NULL;
+ if (restore_context) {
+ __ mov(esi, *context_restore_operand);
+ }
+ if (stack_space_operand != nullptr) {
+ __ mov(ebx, *stack_space_operand);
+ }
+ __ LeaveApiExitFrame(!restore_context);
// Check if the function scheduled an exception.
ExternalReference scheduled_exception_address =
@@ -4555,7 +4808,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ cmp(Operand::StaticVariable(scheduled_exception_address),
Immediate(isolate->factory()->the_hole_value()));
__ j(not_equal, &promote_scheduled_exception);
- __ bind(&exception_handled);
#if DEBUG
// Check if the function returned a valid JavaScript value.
@@ -4592,14 +4844,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&ok);
#endif
- bool restore_context = context_restore_operand != NULL;
- if (restore_context) {
- __ mov(esi, *context_restore_operand);
- }
- if (stack_space_operand != nullptr) {
- __ mov(ebx, *stack_space_operand);
- }
- __ LeaveApiExitFrame(!restore_context);
if (stack_space_operand != nullptr) {
DCHECK_EQ(0, stack_space);
__ pop(ecx);
@@ -4609,12 +4853,9 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ ret(stack_space * kPointerSize);
}
+ // Re-throw by promoting a scheduled exception.
__ bind(&promote_scheduled_exception);
- {
- FrameScope frame(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kPromoteScheduledException, 0);
- }
- __ jmp(&exception_handled);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
// HandleScope limit has changed. Delete allocated extensions.
ExternalReference delete_extensions =
diff --git a/deps/v8/src/x87/debug-x87.cc b/deps/v8/src/x87/debug-x87.cc
index cdbcbad966..0bbee14eaa 100644
--- a/deps/v8/src/x87/debug-x87.cc
+++ b/deps/v8/src/x87/debug-x87.cc
@@ -13,60 +13,60 @@
namespace v8 {
namespace internal {
-bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
-}
+// Patch the code at the current PC with a call to the target address.
+// Additional guard int3 instructions can be added if required.
+void PatchCodeWithCall(Address pc, Address target, int guard_bytes) {
+ // Call instruction takes up 5 bytes and int3 takes up one byte.
+ static const int kCallCodeSize = 5;
+ int code_size = kCallCodeSize + guard_bytes;
+ // Create a code patcher.
+ CodePatcher patcher(pc, code_size);
-// Patch the JS frame exit code with a debug break call. See
-// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x87.cc
-// for the precise return instructions sequence.
-void BreakLocationIterator::SetDebugBreakAtReturn() {
- DCHECK(Assembler::kJSReturnSequenceLength >=
- Assembler::kCallInstructionLength);
- rinfo()->PatchCodeWithCall(
- debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
- Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
-}
+// Add a label for checking the size of the code used for returning.
+#ifdef DEBUG
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
+#endif
+ // Patch the code.
+ patcher.masm()->call(target, RelocInfo::NONE32);
-// Restore the JS frame exit code.
-void BreakLocationIterator::ClearDebugBreakAtReturn() {
- rinfo()->PatchCode(original_rinfo()->pc(),
- Assembler::kJSReturnSequenceLength);
-}
+ // Check that the size of the code generated is as expected.
+ DCHECK_EQ(kCallCodeSize,
+ patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
+ // Add the requested number of int3 instructions after the call.
+ DCHECK_GE(guard_bytes, 0);
+ for (int i = 0; i < guard_bytes; i++) {
+ patcher.masm()->int3();
+ }
-// A debug break in the frame exit code is identified by the JS frame exit code
-// having been patched with a call instruction.
-bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
+ CpuFeatures::FlushICache(pc, code_size);
}
-bool BreakLocationIterator::IsDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- // Check whether the debug break slot instructions have been patched.
- return rinfo()->IsPatchedDebugBreakSlotSequence();
+// Patch the JS frame exit code with a debug break call. See
+// CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x87.cc
+// for the precise return instructions sequence.
+void BreakLocation::SetDebugBreakAtReturn() {
+ DCHECK(Assembler::kJSReturnSequenceLength >=
+ Assembler::kCallInstructionLength);
+ PatchCodeWithCall(
+ pc(), debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
+ Assembler::kJSReturnSequenceLength - Assembler::kCallInstructionLength);
}
-void BreakLocationIterator::SetDebugBreakAtSlot() {
+void BreakLocation::SetDebugBreakAtSlot() {
DCHECK(IsDebugBreakSlot());
Isolate* isolate = debug_info_->GetIsolate();
- rinfo()->PatchCodeWithCall(
- isolate->builtins()->Slot_DebugBreak()->entry(),
+ PatchCodeWithCall(
+ pc(), isolate->builtins()->Slot_DebugBreak()->entry(),
Assembler::kDebugBreakSlotLength - Assembler::kCallInstructionLength);
}
-void BreakLocationIterator::ClearDebugBreakAtSlot() {
- DCHECK(IsDebugBreakSlot());
- rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
-}
-
-
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index ffd2fa84ef..edc08abe6f 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -228,7 +228,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
#define __ masm()->
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
// Save all general purpose registers before messing with them.
diff --git a/deps/v8/src/x87/frames-x87.h b/deps/v8/src/x87/frames-x87.h
index 5b91baf385..e3876bc722 100644
--- a/deps/v8/src/x87/frames-x87.h
+++ b/deps/v8/src/x87/frames-x87.h
@@ -115,11 +115,6 @@ inline Object* JavaScriptFrame::function_slot_object() const {
}
-inline void StackHandler::SetFp(Address slot, Address fp) {
- Memory::Address_at(slot) = fp;
-}
-
-
} } // namespace v8::internal
#endif // V8_X87_FRAMES_X87_H_
diff --git a/deps/v8/src/x87/full-codegen-x87.cc b/deps/v8/src/x87/full-codegen-x87.cc
index 4ec21ae09c..59ff09f2db 100644
--- a/deps/v8/src/x87/full-codegen-x87.cc
+++ b/deps/v8/src/x87/full-codegen-x87.cc
@@ -95,7 +95,8 @@ class JumpPatchSite BASE_EMBEDDED {
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
- isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+ Handle<HandlerTable>::cast(isolate()->factory()->NewFixedArray(
+ HandlerTable::LengthForRange(function()->handler_count()), TENURED));
profiling_counter_ = isolate()->factory()->NewCell(
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
@@ -188,7 +189,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in edi.
- if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+ if (info->scope()->is_script_scope()) {
__ push(edi);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext, 2);
@@ -233,6 +234,11 @@ void FullCodeGenerator::Generate() {
}
}
+ ArgumentsAccessStub::HasNewTarget has_new_target =
+ IsSubclassConstructor(info->function()->kind())
+ ? ArgumentsAccessStub::HAS_NEW_TARGET
+ : ArgumentsAccessStub::NO_NEW_TARGET;
+
// Possibly allocate RestParameters
int rest_index;
Variable* rest_param = scope()->rest_parameter(&rest_index);
@@ -241,6 +247,11 @@ void FullCodeGenerator::Generate() {
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
+ if (has_new_target == ArgumentsAccessStub::HAS_NEW_TARGET) {
+ --num_parameters;
+ ++rest_index;
+ }
+
__ lea(edx,
Operand(ebp, StandardFrameConstants::kCallerSPOffset + offset));
__ push(edx);
@@ -281,10 +292,7 @@ void FullCodeGenerator::Generate() {
} else {
type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
}
- ArgumentsAccessStub::HasNewTarget has_new_target =
- IsSubclassConstructor(info->function()->kind())
- ? ArgumentsAccessStub::HAS_NEW_TARGET
- : ArgumentsAccessStub::NO_NEW_TARGET;
+
ArgumentsAccessStub stub(isolate(), type, has_new_target);
__ CallStub(&stub);
@@ -1442,7 +1450,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Immediate(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
- CallLoadIC(CONTEXTUAL);
+ CallGlobalLoadIC(var->name());
context()->Plug(eax);
break;
}
@@ -2101,7 +2109,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ mov(load_name, isolate()->factory()->throw_string()); // "throw"
__ push(load_name); // "throw"
__ push(Operand(esp, 2 * kPointerSize)); // iter
@@ -2113,16 +2120,17 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// re-boxing.
__ bind(&l_try);
__ pop(eax); // result
- __ PushTryHandler(StackHandler::CATCH, expr->index());
- const int handler_size = StackHandlerConstants::kSize;
+ EnterTryBlock(expr->index(), &l_catch);
+ const int try_block_size = TryCatch::kElementCount * kPointerSize;
__ push(eax); // result
__ jmp(&l_suspend);
__ bind(&l_continuation);
__ jmp(&l_resume);
__ bind(&l_suspend);
- const int generator_object_depth = kPointerSize + handler_size;
+ const int generator_object_depth = kPointerSize + try_block_size;
__ mov(eax, Operand(esp, generator_object_depth));
__ push(eax); // g
+ __ push(Immediate(Smi::FromInt(expr->index()))); // handler-index
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
__ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
Immediate(Smi::FromInt(l_continuation.pos())));
@@ -2130,13 +2138,13 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(ecx, esi);
__ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
kDontSaveFPRegs);
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 2);
__ mov(context_register(),
Operand(ebp, StandardFrameConstants::kContextOffset));
__ pop(eax); // result
EmitReturnSequence();
__ bind(&l_resume); // received in eax
- __ PopTryHandler();
+ ExitTryBlock(expr->index());
// receiver = iter; f = iter.next; arg = received;
__ bind(&l_next);
@@ -2482,6 +2490,16 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
__ push(Operand(esp, 0)); // prototype
}
EmitPropertyKey(property, lit->GetIdForProperty(i));
+
+ // The static prototype property is read only. We handle the non computed
+ // property name case in the parser. Since this is the only case where we
+ // need to check for an own read only property we special case this so we do
+ // not need to do this for every property.
+ if (property->is_static() && property->is_computed_name()) {
+ __ CallRuntime(Runtime::kThrowIfStaticPrototype, 1);
+ __ push(eax);
+ }
+
VisitForStackValue(value);
EmitSetHomeObjectIfNeeded(value, 2);
@@ -2619,25 +2637,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
- } else if (op == Token::INIT_CONST_LEGACY) {
- // Const initializers need a write barrier.
- DCHECK(!var->IsParameter()); // No const parameters.
- if (var->IsLookupSlot()) {
- __ push(eax);
- __ push(esi);
- __ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
- } else {
- DCHECK(var->IsStackLocal() || var->IsContextSlot());
- Label skip;
- MemOperand location = VarOperand(var, ecx);
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ j(not_equal, &skip, Label::kNear);
- EmitStoreToStackLocalOrContextSlot(var, location);
- __ bind(&skip);
- }
-
} else if (var->mode() == LET && op != Token::INIT_LET) {
// Non-initializing assignment to let variable needs a write barrier.
DCHECK(!var->IsLookupSlot());
@@ -2651,6 +2650,21 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
__ CallRuntime(Runtime::kThrowReferenceError, 1);
__ bind(&assign);
EmitStoreToStackLocalOrContextSlot(var, location);
+
+ } else if (var->mode() == CONST && op != Token::INIT_CONST) {
+ // Assignment to const variable needs a write barrier.
+ DCHECK(!var->IsLookupSlot());
+ DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+ Label const_error;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &const_error, Label::kNear);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kThrowReferenceError, 1);
+ __ bind(&const_error);
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
@@ -2672,8 +2686,33 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
EmitStoreToStackLocalOrContextSlot(var, location);
}
- } else if (IsSignallingAssignmentToConst(var, op, language_mode())) {
- __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+
+ } else if (op == Token::INIT_CONST_LEGACY) {
+ // Const initializers need a write barrier.
+ DCHECK(var->mode() == CONST_LEGACY);
+ DCHECK(!var->IsParameter()); // No const parameters.
+ if (var->IsLookupSlot()) {
+ __ push(eax);
+ __ push(esi);
+ __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+ } else {
+ DCHECK(var->IsStackLocal() || var->IsContextSlot());
+ Label skip;
+ MemOperand location = VarOperand(var, ecx);
+ __ mov(edx, location);
+ __ cmp(edx, isolate()->factory()->the_hole_value());
+ __ j(not_equal, &skip, Label::kNear);
+ EmitStoreToStackLocalOrContextSlot(var, location);
+ __ bind(&skip);
+ }
+
+ } else {
+ DCHECK(var->mode() == CONST_LEGACY && op != Token::INIT_CONST_LEGACY);
+ if (is_strict(language_mode())) {
+ __ CallRuntime(Runtime::kThrowConstAssignError, 0);
+ }
+ // Silently ignore store in sloppy mode.
}
}
@@ -3141,8 +3180,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
- if (!ValidateSuperCall(expr)) return;
-
Variable* new_target_var = scope()->DeclarationScope()->new_target_var();
GetVar(eax, new_target_var);
__ push(eax);
@@ -3654,8 +3691,8 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ mov(eax, FieldOperand(eax, Map::kConstructorOffset));
- __ CmpObjectType(eax, JS_FUNCTION_TYPE, ebx);
+ __ GetMapConstructor(eax, eax, ebx);
+ __ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
__ j(not_equal, &non_function_constructor);
// eax now contains the constructor function. Grab the
@@ -3957,7 +3994,7 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4005,7 +4042,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
__ jmp(&done);
NopRuntimeCallHelper call_helper;
- generator.GenerateSlow(masm_, call_helper);
+ generator.GenerateSlow(masm_, NOT_PART_OF_IC_HANDLER, call_helper);
__ bind(&done);
context()->Plug(result);
@@ -4177,7 +4214,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
// Call runtime to perform the lookup.
__ push(cache);
__ push(key);
- __ CallRuntime(Runtime::kGetFromCache, 2);
+ __ CallRuntime(Runtime::kGetFromCacheRT, 2);
__ bind(&done);
context()->Plug(eax);
@@ -4497,17 +4534,11 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
- if (expr->function() != NULL &&
- expr->function()->intrinsic_type == Runtime::INLINE) {
- Comment cmnt(masm_, "[ InlineRuntimeCall");
- EmitInlineRuntimeCall(expr);
- return;
- }
-
- Comment cmnt(masm_, "[ CallRuntime");
ZoneList<Expression*>* args = expr->arguments();
+ int arg_count = args->length();
if (expr->is_jsruntime()) {
+ Comment cmnt(masm_, "[ CallRuntime");
// Push the builtins object as receiver.
__ mov(eax, GlobalObjectOperand());
__ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
@@ -4527,9 +4558,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
__ push(Operand(esp, 0));
__ mov(Operand(esp, kPointerSize), eax);
- // Code common for calls using the IC.
- ZoneList<Expression*>* args = expr->arguments();
- int arg_count = args->length();
+ // Push the arguments ("left-to-right").
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@@ -4544,16 +4573,27 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
context()->DropAndPlug(1, eax);
} else {
- // Push the arguments ("left-to-right").
- int arg_count = args->length();
- for (int i = 0; i < arg_count; i++) {
- VisitForStackValue(args->at(i));
- }
-
- // Call the C runtime function.
- __ CallRuntime(expr->function(), arg_count);
+ const Runtime::Function* function = expr->function();
+ switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+ case Runtime::kInline##Name: { \
+ Comment cmnt(masm_, "[ Inline" #Name); \
+ return Emit##Name(expr); \
+ }
+ FOR_EACH_FULL_CODE_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+ default: {
+ Comment cmnt(masm_, "[ CallRuntime for unhandled intrinsic");
+ // Push the arguments ("left-to-right").
+ for (int i = 0; i < arg_count; i++) {
+ VisitForStackValue(args->at(i));
+ }
- context()->Plug(eax);
+ // Call the C runtime function.
+ __ CallRuntime(expr->function(), arg_count);
+ context()->Plug(eax);
+ }
+ }
}
}
@@ -5205,17 +5245,6 @@ void FullCodeGenerator::EnterFinallyBlock() {
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(edx, Operand::StaticVariable(pending_message_obj));
__ push(edx);
-
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(edx, Operand::StaticVariable(has_pending_message));
- __ SmiTag(edx);
- __ push(edx);
-
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(edx, Operand::StaticVariable(pending_message_script));
- __ push(edx);
}
@@ -5223,17 +5252,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
DCHECK(!result_register().is(edx));
// Restore pending message from stack.
__ pop(edx);
- ExternalReference pending_message_script =
- ExternalReference::address_of_pending_message_script(isolate());
- __ mov(Operand::StaticVariable(pending_message_script), edx);
-
- __ pop(edx);
- __ SmiUntag(edx);
- ExternalReference has_pending_message =
- ExternalReference::address_of_has_pending_message(isolate());
- __ mov(Operand::StaticVariable(has_pending_message), edx);
-
- __ pop(edx);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(Operand::StaticVariable(pending_message_obj), edx);
@@ -5251,33 +5269,6 @@ void FullCodeGenerator::ExitFinallyBlock() {
#undef __
-#define __ ACCESS_MASM(masm())
-
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
- // The macros used here must preserve the result register.
-
- // Because the handler block contains the context of the finally
- // code, we can restore it directly from there for the finally code
- // rather than iteratively unwinding contexts via their previous
- // links.
- __ Drop(*stack_depth); // Down to the handler block.
- if (*context_length > 0) {
- // Restore the context to its dedicated register and the stack.
- __ mov(esi, Operand(esp, StackHandlerConstants::kContextOffset));
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
- }
- __ PopTryHandler();
- __ call(finally_entry_);
-
- *stack_depth = 0;
- *context_length = 0;
- return previous_;
-}
-
-#undef __
-
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x11;
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index c9ef8d1183..506ed47fac 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -228,6 +228,12 @@ void InternalArrayConstructorDescriptor::Initialize(
}
+void CompareDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+ Register registers[] = {esi, edx, eax};
+ data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {esi, eax};
data->Initialize(arraysize(registers), registers, NULL);
diff --git a/deps/v8/src/x87/lithium-codegen-x87.cc b/deps/v8/src/x87/lithium-codegen-x87.cc
index 07be757ed5..05944f9f49 100644
--- a/deps/v8/src/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/x87/lithium-codegen-x87.cc
@@ -10,6 +10,7 @@
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
+#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
@@ -109,7 +110,7 @@ bool LCodeGen::GeneratePrologue() {
// Sloppy mode functions and builtins need to replace the receiver with the
// global proxy when called as functions (without an explicit receiver
// object).
- if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
+ if (graph()->this_has_uses() && is_sloppy(info_->language_mode()) &&
!info_->is_native()) {
Label ok;
// +1 for return address.
@@ -373,10 +374,11 @@ void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
bool LCodeGen::GenerateJumpTable() {
+ if (!jump_table_.length()) return !is_aborted();
+
Label needs_frame;
- if (jump_table_.length() > 0) {
- Comment(";;; -------------------- Jump table --------------------");
- }
+ Comment(";;; -------------------- Jump table --------------------");
+
for (int i = 0; i < jump_table_.length(); i++) {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
@@ -385,33 +387,57 @@ bool LCodeGen::GenerateJumpTable() {
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- if (needs_frame.is_bound()) {
- __ jmp(&needs_frame);
- } else {
- __ bind(&needs_frame);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we don't
- // have a function pointer to install in the stack frame that we're
- // building, install a special marker there instead.
- DCHECK(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push a PC inside the function so that the deopt code can find where
- // the deopt comes from. It doesn't have to be the precise return
- // address of a "calling" LAZY deopt, it only has to be somewhere
- // inside the code body.
- Label push_approx_pc;
- __ call(&push_approx_pc);
- __ bind(&push_approx_pc);
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 3 * kPointerSize));
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering registers.
- }
+ __ call(&needs_frame);
} else {
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
+ info()->LogDeoptCallPosition(masm()->pc_offset(),
+ table_entry->deopt_info.inlining_id);
+ }
+ if (needs_frame.is_linked()) {
+ __ bind(&needs_frame);
+
+ /* stack layout
+ 4: entry address
+ 3: return address <-- esp
+ 2: garbage
+ 1: garbage
+ 0: garbage
+ */
+ __ sub(esp, Immediate(kPointerSize)); // Reserve space for stub marker.
+ __ push(MemOperand(esp, kPointerSize)); // Copy return address.
+ __ push(MemOperand(esp, 3 * kPointerSize)); // Copy entry address.
+
+ /* stack layout
+ 4: entry address
+ 3: return address
+ 2: garbage
+ 1: return address
+ 0: entry address <-- esp
+ */
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp); // Save ebp.
+
+ // Copy context.
+ __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+ // Fill ebp with the right stack frame address.
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+
+ // This variant of deopt can only be used with stubs. Since we don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ DCHECK(info()->IsStub());
+ __ mov(MemOperand(esp, 2 * kPointerSize),
+ Immediate(Smi::FromInt(StackFrame::STUB)));
+
+ /* stack layout
+ 4: old ebp
+ 3: context pointer
+ 2: stub marker
+ 1: return address
+ 0: entry address <-- esp
+ */
+ __ ret(0); // Call the continuation without clobbering registers.
}
return !is_aborted();
}
@@ -1143,12 +1169,13 @@ void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
__ bind(&done);
}
- Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position().raw(),
- instr->Mnemonic(), deopt_reason);
+ Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
DCHECK(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
+ info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
@@ -2854,9 +2881,9 @@ void LCodeGen::EmitClassOfTest(Label* is_true,
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
- __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
+ __ GetMapConstructor(temp, temp, temp2);
// Objects with a non-function constructor have class 'Object'.
- __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
+ __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ j(not_equal, is_true);
} else {
@@ -3112,16 +3139,6 @@ void LCodeGen::DoReturn(LReturn* instr) {
}
-void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
- Register result = ToRegister(instr->result());
- __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
- }
-}
-
-
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
DCHECK(FLAG_vector_ics);
@@ -3151,30 +3168,12 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
}
ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
- Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(isolate(), mode,
+ PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
-void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
- Register value = ToRegister(instr->value());
- Handle<PropertyCell> cell_handle = instr->hydrogen()->cell().handle();
-
- // If the cell we are storing to contains the hole it could have
- // been deleted from the property dictionary. In that case, we need
- // to update the property details in the property dictionary to mark
- // it as no longer deleted. We deoptimize in that case.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
- DeoptimizeIf(equal, instr, Deoptimizer::kHole);
- }
-
- // Store the value.
- __ mov(Operand::ForCell(cell_handle), value);
- // Cells are always rescanned, so no write barrier here.
-}
-
-
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
@@ -3281,8 +3280,9 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
if (FLAG_vector_ics) {
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
}
- Handle<Code> ic =
- CodeFactory::LoadICInOptimizedCode(isolate(), NOT_CONTEXTUAL).code();
+ Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
+ isolate(), NOT_CONTEXTUAL,
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -3505,7 +3505,9 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
- Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(isolate()).code();
+ Handle<Code> ic =
+ CodeFactory::KeyedLoadICInOptimizedCode(
+ isolate(), instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4099,7 +4101,7 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ push(temp_result);
- __ CallRuntimeSaveDoubles(Runtime::kMathSqrtRT);
+ __ CallRuntimeSaveDoubles(Runtime::kMathSqrt);
RecordSafepointWithRegisters(instr->pointer_map(), 1,
Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(temp_result, eax);
@@ -4263,14 +4265,8 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
void LCodeGen::DoMathClz32(LMathClz32* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
- Label not_zero_input;
- __ bsr(result, input);
- __ j(not_zero, &not_zero_input);
- __ Move(result, Immediate(63)); // 63^31 == 32
-
- __ bind(&not_zero_input);
- __ xor_(result, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+ __ Lzcnt(result, input);
}
@@ -4570,7 +4566,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
__ mov(StoreDescriptor::NameRegister(), instr->name());
- Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
+ Handle<Code> ic =
+ StoreIC::initialize_stub(isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state());
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
@@ -4800,8 +4798,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
- Handle<Code> ic =
- CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
+ Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+ isolate(), instr->language_mode(),
+ instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
diff --git a/deps/v8/src/x87/lithium-x87.cc b/deps/v8/src/x87/lithium-x87.cc
index eeccd1bcff..009d47029c 100644
--- a/deps/v8/src/x87/lithium-x87.cc
+++ b/deps/v8/src/x87/lithium-x87.cc
@@ -2130,14 +2130,6 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
}
-LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
- LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
- return instr->RequiresHoleCheck()
- ? AssignEnvironment(DefineAsRegister(result))
- : DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* global_object =
@@ -2153,13 +2145,6 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
- LStoreGlobalCell* result =
- new(zone()) LStoreGlobalCell(UseRegister(instr->value()));
- return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
-}
-
-
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
LInstruction* result =
diff --git a/deps/v8/src/x87/lithium-x87.h b/deps/v8/src/x87/lithium-x87.h
index ccd197fc4b..becd4f2a7a 100644
--- a/deps/v8/src/x87/lithium-x87.h
+++ b/deps/v8/src/x87/lithium-x87.h
@@ -104,7 +104,6 @@ class LCodeGen;
V(LoadContextSlot) \
V(LoadFieldByIndex) \
V(LoadFunctionPrototype) \
- V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyed) \
V(LoadKeyedGeneric) \
@@ -145,7 +144,6 @@ class LCodeGen;
V(StoreCodeEntry) \
V(StoreContextSlot) \
V(StoreFrameContext) \
- V(StoreGlobalCell) \
V(StoreKeyed) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
@@ -1726,13 +1724,6 @@ class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
};
-class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
-};
-
-
class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
public:
LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
@@ -1754,19 +1745,6 @@ class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
};
-class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 0> {
- public:
- explicit LStoreGlobalCell(LOperand* value) {
- inputs_[0] = value;
- }
-
- LOperand* value() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
- DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
-};
-
-
class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadContextSlot(LOperand* context) {
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index 41b93d9239..f6541b89e5 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -14,7 +14,6 @@
#include "src/debug.h"
#include "src/isolate-inl.h"
#include "src/runtime/runtime.h"
-#include "src/serialize.h"
namespace v8 {
namespace internal {
@@ -987,44 +986,21 @@ void MacroAssembler::LeaveApiExitFrame(bool restore_context) {
}
-void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
- int handler_index) {
+void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // We will build up the handler from the bottom by pushing on the stack.
- // First push the frame pointer and context.
- if (kind == StackHandler::JS_ENTRY) {
- // The frame pointer does not point to a JS frame so we save NULL for
- // ebp. We expect the code throwing an exception to check ebp before
- // dereferencing it to restore the context.
- push(Immediate(0)); // NULL frame pointer.
- push(Immediate(Smi::FromInt(0))); // No context.
- } else {
- push(ebp);
- push(esi);
- }
- // Push the state and the code object.
- unsigned state =
- StackHandler::IndexField::encode(handler_index) |
- StackHandler::KindField::encode(kind);
- push(Immediate(state));
- Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
push(Operand::StaticVariable(handler_address));
+
// Set this new handler as the current one.
mov(Operand::StaticVariable(handler_address), esp);
}
-void MacroAssembler::PopTryHandler() {
+void MacroAssembler::PopStackHandler() {
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
pop(Operand::StaticVariable(handler_address));
@@ -1032,103 +1008,6 @@ void MacroAssembler::PopTryHandler() {
}
-void MacroAssembler::JumpToHandlerEntry() {
- // Compute the handler entry address and jump to it. The handler table is
- // a fixed array of (smi-tagged) code offsets.
- // eax = exception, edi = code object, edx = state.
- mov(ebx, FieldOperand(edi, Code::kHandlerTableOffset));
- shr(edx, StackHandler::kKindWidth);
- mov(edx, FieldOperand(ebx, edx, times_4, FixedArray::kHeaderSize));
- SmiUntag(edx);
- lea(edi, FieldOperand(edi, edx, times_1, Code::kHeaderSize));
- jmp(edi);
-}
-
-
-void MacroAssembler::Throw(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in eax.
- if (!value.is(eax)) {
- mov(eax, value);
- }
- // Drop the stack pointer to the top of the top handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- mov(esp, Operand::StaticVariable(handler_address));
- // Restore the next handler.
- pop(Operand::StaticVariable(handler_address));
-
- // Remove the code object and state, compute the handler address in edi.
- pop(edi); // Code object.
- pop(edx); // Index and state.
-
- // Restore the context and frame pointer.
- pop(esi); // Context.
- pop(ebp); // Frame pointer.
-
- // If the handler is a JS frame, restore the context to the frame.
- // (kind == ENTRY) == (ebp == 0) == (esi == 0), so we could test either
- // ebp or esi.
- Label skip;
- test(esi, esi);
- j(zero, &skip, Label::kNear);
- mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
- bind(&skip);
-
- JumpToHandlerEntry();
-}
-
-
-void MacroAssembler::ThrowUncatchable(Register value) {
- // Adjust this code if not the case.
- STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
- STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
- STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
-
- // The exception is expected in eax.
- if (!value.is(eax)) {
- mov(eax, value);
- }
- // Drop the stack pointer to the top of the top stack handler.
- ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
- mov(esp, Operand::StaticVariable(handler_address));
-
- // Unwind the handlers until the top ENTRY handler is found.
- Label fetch_next, check_kind;
- jmp(&check_kind, Label::kNear);
- bind(&fetch_next);
- mov(esp, Operand(esp, StackHandlerConstants::kNextOffset));
-
- bind(&check_kind);
- STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
- test(Operand(esp, StackHandlerConstants::kStateOffset),
- Immediate(StackHandler::KindField::kMask));
- j(not_zero, &fetch_next);
-
- // Set the top handler address to next handler past the top ENTRY handler.
- pop(Operand::StaticVariable(handler_address));
-
- // Remove the code object and state, compute the handler address in edi.
- pop(edi); // Code object.
- pop(edx); // Index and state.
-
- // Clear the context pointer and frame pointer (0 was saved in the handler).
- pop(esi);
- pop(ebp);
-
- JumpToHandlerEntry();
-}
-
-
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
Register scratch1,
Register scratch2,
@@ -1883,6 +1762,20 @@ void MacroAssembler::NegativeZeroTest(Register result,
}
+void MacroAssembler::GetMapConstructor(Register result, Register map,
+ Register temp) {
+ Label done, loop;
+ mov(result, FieldOperand(map, Map::kConstructorOrBackPointerOffset));
+ bind(&loop);
+ JumpIfSmi(result, &done);
+ CmpObjectType(result, MAP_TYPE, temp);
+ j(not_equal, &done);
+ mov(result, FieldOperand(result, Map::kConstructorOrBackPointerOffset));
+ jmp(&loop);
+ bind(&done);
+}
+
+
void MacroAssembler::TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
@@ -1934,7 +1827,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function,
// Non-instance prototype: Fetch prototype from constructor field
// in initial map.
bind(&non_instance);
- mov(result, FieldOperand(result, Map::kConstructorOffset));
+ GetMapConstructor(result, result, scratch);
}
// All done.
@@ -2448,6 +2341,17 @@ void MacroAssembler::Move(const Operand& dst, const Immediate& x) {
}
+void MacroAssembler::Lzcnt(Register dst, const Operand& src) {
+ // TODO(intel): Add support for LZCNT (with ABM/BMI1).
+ Label not_zero_src;
+ bsr(dst, src);
+ j(not_zero, &not_zero_src, Label::kNear);
+ Move(dst, Immediate(63)); // 63^31 == 32
+ bind(&not_zero_src);
+ xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
+}
+
+
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
diff --git a/deps/v8/src/x87/macro-assembler-x87.h b/deps/v8/src/x87/macro-assembler-x87.h
index c25203f2d1..c392598c91 100644
--- a/deps/v8/src/x87/macro-assembler-x87.h
+++ b/deps/v8/src/x87/macro-assembler-x87.h
@@ -535,17 +535,11 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// Exception handling
- // Push a new try handler and link it into try handler chain.
- void PushTryHandler(StackHandler::Kind kind, int handler_index);
+ // Push a new stack handler and link it into stack handler chain.
+ void PushStackHandler();
- // Unlink the stack handler on top of the stack from the try handler chain.
- void PopTryHandler();
-
- // Throw to the top handler in the try hander chain.
- void Throw(Register value);
-
- // Throw past all JS frames to the top JS entry frame.
- void ThrowUncatchable(Register value);
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ void PopStackHandler();
// ---------------------------------------------------------------------------
// Inline caching support
@@ -683,6 +677,10 @@ class MacroAssembler: public Assembler {
void NegativeZeroTest(Register result, Register op1, Register op2,
Register scratch, Label* then_label);
+ // Machine code version of Map::GetConstructor().
+ // |temp| holds |result|'s map when done.
+ void GetMapConstructor(Register result, Register map, Register temp);
+
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
@@ -778,6 +776,9 @@ class MacroAssembler: public Assembler {
void Push(Register src) { push(src); }
void Pop(Register dst) { pop(dst); }
+ void Lzcnt(Register dst, Register src) { Lzcnt(dst, Operand(src)); }
+ void Lzcnt(Register dst, const Operand& src);
+
// Emit call to the code we are currently generating.
void CallSelf() {
Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
@@ -966,10 +967,6 @@ class MacroAssembler: public Assembler {
Register bitmap_reg,
Register mask_reg);
- // Helper for throwing exceptions. Compute a handler address and jump to
- // it. See the implementation for register usage.
- void JumpToHandlerEntry();
-
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 1b851e44cf..60536a4420 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -86,6 +86,7 @@
'compiler/test-run-machops.cc',
'compiler/test-run-properties.cc',
'compiler/test-run-stackcheck.cc',
+ 'compiler/test-run-stubs.cc',
'compiler/test-run-variables.cc',
'compiler/test-simplified-lowering.cc',
'cctest.cc',
@@ -99,6 +100,7 @@
'test-api.cc',
'test-api.h',
'test-api-interceptors.cc',
+ 'test-array-list.cc',
'test-ast.cc',
'test-atomicops.cc',
'test-bignum.cc',
@@ -156,6 +158,7 @@
'test-thread-termination.cc',
'test-threads.cc',
'test-transitions.cc',
+ 'test-typedarrays.cc',
'test-types.cc',
'test-unbound-queue.cc',
'test-unboxed-doubles.cc',
@@ -270,6 +273,9 @@
},
},
}],
+ ['OS=="aix"', {
+ 'ldflags': [ '-Wl,-bbigtoc' ],
+ }],
['component=="shared_library"', {
# cctest can't be built against a shared library, so we need to
# depend on the underlying static target in that case.
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index e111438cdf..08877c8268 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -558,6 +558,9 @@ static inline void SimulateIncrementalMarking(i::Heap* heap) {
CHECK(marking->IsMarking());
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (marking->IsReadyToOverApproximateWeakClosure()) {
+ marking->MarkObjectGroups();
+ }
}
CHECK(marking->IsComplete());
}
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index c58b1f0204..d7f72495ae 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -64,9 +64,6 @@
# are actually 13 * 38 * 5 * 128 = 316160 individual tests hidden here.
'test-parsing/ParserSync': [PASS, NO_VARIANTS],
- # Modules are busted
- 'test-parsing/ExportsMaybeAssigned': [SKIP],
-
# This tests only the type system, so there is no point in running several
# variants.
'test-hydrogen-types/*': [PASS, NO_VARIANTS],
@@ -75,13 +72,25 @@
# The cpu profiler tests are notoriously flaky.
# BUG(2999). (test/cpu-profiler/CollectCpuProfile)
# BUG(3287). (test-cpu-profiler/SampleWhenFrameIsNotSetup)
- 'test-cpu-profiler/*': [PASS, FLAKY],
- 'test-cpu-profiler/*': [SKIP],
+ 'test-cpu-profiler/CollectCpuProfile': [SKIP],
+ 'test-cpu-profiler/CollectCpuProfileSamples': [SKIP],
+ 'test-cpu-profiler/FunctionApplySample': [SKIP],
+ 'test-cpu-profiler/FunctionCallSample': [SKIP],
+ 'test-cpu-profiler/SampleWhenFrameIsNotSetup': [SKIP],
+ 'test-cpu-profiler/HotDeoptNoFrameEntry': [SKIP],
+ 'test-cpu-profiler/BoundFunctionCall': [SKIP],
+ 'test-cpu-profiler/CpuProfileDeepStack': [SKIP],
+ 'test-cpu-profiler/JsNativeJsSample': [SKIP],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSample': [SKIP],
+ 'test-cpu-profiler/JsNative1JsNative2JsSample': [SKIP],
# BUG(3525). Test crashes flakily.
'test-debug/RecursiveBreakpoints': [PASS, FLAKY],
'test-debug/RecursiveBreakpointsGlobal': [PASS, FLAKY],
+ # Fails sometimes.
+ 'test-debug/ProcessDebugMessagesThreaded': [PASS, FLAKY],
+
##############################################################################
# TurboFan compiler failures.
@@ -94,14 +103,15 @@
# BUG(3742).
'test-mark-compact/MarkCompactCollector': [PASS, ['arch==arm', NO_VARIANTS]],
- # TODO(jarin/mstarzinger): Investigate debugger issues with TurboFan.
- 'test-debug/DebugStepNatives': [PASS, NO_VARIANTS],
- 'test-debug/DebugStepFunctionApply': [PASS, NO_VARIANTS],
- 'test-debug/DebugStepFunctionCall': [PASS, NO_VARIANTS],
+ # TODO(jarin): Cannot lazy-deoptimize from conversions before comparisons.
+ 'test-js-typed-lowering/OrderCompareEffects': [SKIP],
# TODO(jochen): Reenable after we removed the CHECK() from the marking queue.
'test-mark-compact/MarkingDeque': [SKIP],
+ 'test-heap/TestInternalWeakLists': [PASS, ['arch==arm', NO_VARIANTS]],
+ 'test-heap/TestInternalWeakListsTraverseWithGC': [PASS, ['arch==arm', NO_VARIANTS]],
+
############################################################################
# Slow tests.
'test-api/Threading1': [PASS, ['mode == debug', SLOW]],
@@ -114,15 +124,14 @@
##############################################################################
['arch == arm64', {
+ 'test-cpu-profiler/CollectDeoptEvents': [PASS, FAIL],
+
'test-api/Bug618': [PASS],
# BUG(v8:3385).
'test-serialize/DeserializeFromSecondSerialization': [PASS, FAIL],
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [PASS, FAIL],
- # BUG(v8:2999).
- 'test-cpu-profiler/CollectCpuProfile': [PASS, FAIL],
-
# BUG(v8:3154).
'test-heap/ReleaseOverReservedPages': [PASS, FAIL],
@@ -180,18 +189,12 @@
##############################################################################
['system == windows', {
- # BUG(2999).
- 'test-cpu-profiler/CollectCpuProfile': [PASS, FAIL],
-
# BUG(3005).
'test-alloc/CodeRange': [PASS, FAIL],
# BUG(3331). Fails on windows.
'test-heap/NoWeakHashTableLeakWithIncrementalMarking': [SKIP],
- # BUG(v8:3433). Crashes on windows.
- 'test-cpu-profiler/FunctionApplySample': [SKIP],
-
}], # 'system == windows'
##############################################################################
@@ -206,6 +209,8 @@
##############################################################################
['arch == arm', {
+ 'test-cpu-profiler/CollectDeoptEvents': [PASS, FAIL],
+
# BUG(355): Test crashes on ARM.
'test-log/ProfLazyMode': [SKIP],
@@ -229,20 +234,13 @@
##############################################################################
['arch == mipsel or arch == mips', {
-
- # BUG(2657): Test sometimes times out on MIPS simulator.
- 'test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate': [PASS, TIMEOUT],
+ 'test-cpu-profiler/CollectDeoptEvents': [PASS, FAIL],
# BUG(1075): Unresolved crashes on MIPS also.
'test-serialize/Deserialize': [SKIP],
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
'test-serialize/DeserializeAndRunScript2': [SKIP],
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
-
- # Test requires turbofan:
- 'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
- 'codegen-tester/CompareWrapper': [SKIP],
- 'codegen-tester/ParametersEqual': [SKIP],
}], # 'arch == mipsel or arch == mips'
##############################################################################
@@ -260,9 +258,7 @@
##############################################################################
['arch == mips64el', {
-
- # BUG(2657): Test sometimes times out on MIPS simulator.
- 'test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate': [PASS, TIMEOUT],
+ 'test-cpu-profiler/CollectDeoptEvents': [PASS, FAIL],
# BUG(v8:3154).
'test-heap/ReleaseOverReservedPages': [PASS, FAIL],
@@ -272,11 +268,6 @@
'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
'test-serialize/DeserializeAndRunScript2': [SKIP],
'test-serialize/DeserializeFromSecondSerialization': [SKIP],
-
- # Test requires turbofan:
- 'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
- 'codegen-tester/CompareWrapper': [SKIP],
- 'codegen-tester/ParametersEqual': [SKIP],
}], # 'arch == mips64el'
##############################################################################
@@ -286,6 +277,7 @@
'codegen-tester/CompareWrapper': [SKIP],
'codegen-tester/ParametersEqual': [SKIP],
'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
+ 'test-serialize/SerializeInternalReference': [FAIL],
}], # 'arch == x87'
##############################################################################
@@ -402,8 +394,13 @@
}], # 'arch == nacl_ia32 or arch == nacl_x64'
-['arch == ppc64', {
- #issue 2857
- 'test-log/EquivalenceOfLoggingAndTraversal' : [SKIP],
-}], # 'arch == ppc64'
+##############################################################################
+['arch == ppc and simulator_run == True or arch == ppc64 and simulator_run == True', {
+
+ # Pass but take too long with the simulator.
+ 'test-api/Threading1': [PASS, SLOW],
+ 'test-api/Threading2': [PASS, SLOW],
+ 'test-api/ExternalArrays': [PASS, SLOW],
+
+}], # 'arch == ppc64 and simulator_run == True'
]
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index ffafaf0803..30bbe1e8aa 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -128,6 +128,20 @@ struct ParameterTraits<T*> {
static uintptr_t Cast(void* r) { return reinterpret_cast<uintptr_t>(r); }
};
+// Additional template specialization required for mips64 to sign-extend
+// parameters defined by calling convention.
+template <>
+struct ParameterTraits<int32_t> {
+ static int64_t Cast(int32_t r) { return static_cast<int64_t>(r); }
+};
+
+template <>
+struct ParameterTraits<uint32_t> {
+ static int64_t Cast(uint32_t r) {
+ return static_cast<int64_t>(static_cast<int32_t>(r));
+ }
+};
+
class CallHelper {
public:
explicit CallHelper(Isolate* isolate, MachineSignature* machine_sig)
@@ -214,6 +228,7 @@ class CallHelper {
return static_cast<uintptr_t>(simulator->Call(f, 4, p1, p2, p3, p4));
}
+
template <typename R, typename F>
R DoCall(F* f) {
return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f)));
diff --git a/deps/v8/test/cctest/compiler/function-tester.h b/deps/v8/test/cctest/compiler/function-tester.h
index 440043cb94..20efd1e304 100644
--- a/deps/v8/test/cctest/compiler/function-tester.h
+++ b/deps/v8/test/cctest/compiler/function-tester.h
@@ -34,6 +34,7 @@ class FunctionTester : public InitializedHandleScope {
flags_(flags) {
Compile(function);
const uint32_t supported_flags = CompilationInfo::kContextSpecializing |
+ CompilationInfo::kBuiltinInliningEnabled |
CompilationInfo::kInliningEnabled |
CompilationInfo::kTypingEnabled;
CHECK_EQ(0u, flags_ & ~supported_flags);
@@ -60,7 +61,6 @@ class FunctionTester : public InitializedHandleScope {
CHECK(isolate->has_pending_exception());
CHECK(try_catch.HasCaught());
CHECK(no_result.is_null());
- // TODO(mstarzinger): Temporary workaround for issue chromium:362388.
isolate->OptionalRescheduleException(true);
}
@@ -71,10 +71,8 @@ class FunctionTester : public InitializedHandleScope {
CHECK(isolate->has_pending_exception());
CHECK(try_catch.HasCaught());
CHECK(no_result.is_null());
- // TODO(mstarzinger): Calling OptionalRescheduleException is a dirty hack,
- // it's the only way to make Message() not to assert because an external
- // exception has been caught by the try_catch.
isolate->OptionalRescheduleException(true);
+ CHECK(!try_catch.Message().IsEmpty());
return try_catch.Message();
}
@@ -152,9 +150,11 @@ class FunctionTester : public InitializedHandleScope {
Handle<JSFunction> Compile(Handle<JSFunction> function) {
// TODO(titzer): make this method private.
#if V8_TURBOFAN_TARGET
- CompilationInfoWithZone info(function);
+ Zone zone;
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info);
- CHECK(Parser::ParseStatic(&info));
+ CHECK(Parser::ParseStatic(info.parse_info()));
info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
if (flags_ & CompilationInfo::kContextSpecializing) {
info.MarkAsContextSpecializing();
@@ -165,7 +165,7 @@ class FunctionTester : public InitializedHandleScope {
if (flags_ & CompilationInfo::kTypingEnabled) {
info.MarkAsTypingEnabled();
}
- CHECK(Compiler::Analyze(&info));
+ CHECK(Compiler::Analyze(info.parse_info()));
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
Pipeline pipeline(&info);
@@ -208,12 +208,14 @@ class FunctionTester : public InitializedHandleScope {
// and replace the JSFunction's code with the result.
Handle<JSFunction> CompileGraph(Graph* graph) {
CHECK(Pipeline::SupportedTarget());
- CompilationInfoWithZone info(function);
+ Zone zone;
+ ParseInfo parse_info(&zone, function);
+ CompilationInfo info(&parse_info);
- CHECK(Parser::ParseStatic(&info));
+ CHECK(Parser::ParseStatic(info.parse_info()));
info.SetOptimizing(BailoutId::None(),
Handle<Code>(function->shared()->code()));
- CHECK(Compiler::Analyze(&info));
+ CHECK(Compiler::Analyze(info.parse_info()));
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
diff --git a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc b/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
index e65ba2e0df..6afdc0a211 100644
--- a/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/simplified-graph-builder.cc
@@ -48,7 +48,7 @@ Node* SimplifiedGraphBuilder::MakeNode(const Operator* op,
DCHECK(op->ValueInputCount() == value_input_count);
DCHECK(!OperatorProperties::HasContextInput(op));
- DCHECK(!OperatorProperties::HasFrameStateInput(op));
+ DCHECK_EQ(0, OperatorProperties::GetFrameStateInputCount(op));
bool has_control = op->ControlInputCount() == 1;
bool has_effect = op->EffectInputCount() == 1;
diff --git a/deps/v8/test/cctest/compiler/test-codegen-deopt.cc b/deps/v8/test/cctest/compiler/test-codegen-deopt.cc
index a90e4025dc..0b59308216 100644
--- a/deps/v8/test/cctest/compiler/test-codegen-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-codegen-deopt.cc
@@ -44,11 +44,14 @@ class DeoptCodegenTester {
explicit DeoptCodegenTester(HandleAndZoneScope* scope, const char* src)
: scope_(scope),
function(NewFunction(src)),
- info(function, scope->main_zone()),
- bailout_id(-1) {
- CHECK(Parser::ParseStatic(&info));
+ parse_info(scope->main_zone(), function),
+ info(&parse_info),
+ bailout_id(-1),
+ tagged_type(1, kMachAnyTagged, zone()),
+ empty_types(zone()) {
+ CHECK(Parser::ParseStatic(&parse_info));
info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
- CHECK(Compiler::Analyze(&info));
+ CHECK(Compiler::Analyze(&parse_info));
CHECK(Compiler::EnsureDeoptimizationSupport(&info));
DCHECK(info.shared_info()->has_deoptimization_support());
@@ -76,11 +79,14 @@ class DeoptCodegenTester {
HandleAndZoneScope* scope_;
Handle<JSFunction> function;
+ ParseInfo parse_info;
CompilationInfo info;
BailoutId bailout_id;
Handle<Code> result_code;
TestInstrSeq* code;
Graph* graph;
+ ZoneVector<MachineType> tagged_type;
+ ZoneVector<MachineType> empty_types;
};
@@ -118,9 +124,10 @@ class TrivialDeoptCodegenTester : public DeoptCodegenTester {
m.NewNode(common.HeapConstant(caller_context_constant));
bailout_id = GetCallBailoutId();
- Node* parameters = m.NewNode(common.StateValues(1), m.UndefinedConstant());
- Node* locals = m.NewNode(common.StateValues(0));
- Node* stack = m.NewNode(common.StateValues(0));
+ Node* parameters =
+ m.NewNode(common.TypedStateValues(&tagged_type), m.UndefinedConstant());
+ Node* locals = m.NewNode(common.TypedStateValues(&empty_types));
+ Node* stack = m.NewNode(common.TypedStateValues(&empty_types));
Node* state_node = m.NewNode(
common.FrameState(JS_FRAME, bailout_id,
@@ -233,9 +240,10 @@ class TrivialRuntimeDeoptCodegenTester : public DeoptCodegenTester {
Node* context_node = m.NewNode(common.HeapConstant(context_constant));
bailout_id = GetCallBailoutId();
- Node* parameters = m.NewNode(common.StateValues(1), m.UndefinedConstant());
- Node* locals = m.NewNode(common.StateValues(0));
- Node* stack = m.NewNode(common.StateValues(0));
+ Node* parameters =
+ m.NewNode(common.TypedStateValues(&tagged_type), m.UndefinedConstant());
+ Node* locals = m.NewNode(common.TypedStateValues(&empty_types));
+ Node* stack = m.NewNode(common.TypedStateValues(&empty_types));
Node* state_node = m.NewNode(
common.FrameState(JS_FRAME, bailout_id,
diff --git a/deps/v8/test/cctest/compiler/test-control-reducer.cc b/deps/v8/test/cctest/compiler/test-control-reducer.cc
index 827dcfdaa8..c2b225ab00 100644
--- a/deps/v8/test/cctest/compiler/test-control-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-control-reducer.cc
@@ -8,7 +8,7 @@
#include "src/base/bits.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/control-reducer.h"
-#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/node-properties.h"
@@ -694,9 +694,9 @@ TEST(CMergeReduce_none1) {
TEST(CMergeReduce_none2) {
ControlReducerTester R;
- Node* t = R.graph.NewNode(R.common.IfTrue(), R.start);
- Node* f = R.graph.NewNode(R.common.IfFalse(), R.start);
- Node* merge = R.graph.NewNode(R.common.Merge(2), t, f);
+ Node* t1 = R.graph.NewNode(R.common.IfTrue(), R.start);
+ Node* t2 = R.graph.NewNode(R.common.IfTrue(), R.start);
+ Node* merge = R.graph.NewNode(R.common.Merge(2), t1, t2);
R.ReduceMerge(merge, merge);
}
@@ -744,7 +744,7 @@ TEST(CMergeReduce_dead_rm1b) {
ControlReducerTester R;
Node* t = R.graph.NewNode(R.common.IfTrue(), R.start);
- Node* f = R.graph.NewNode(R.common.IfFalse(), R.start);
+ Node* f = R.graph.NewNode(R.common.IfTrue(), R.start);
for (int i = 0; i < 2; i++) {
Node* merge = R.graph.NewNode(R.common.Merge(3), R.dead, R.dead, R.dead);
for (int j = i + 1; j < 3; j++) {
@@ -1118,7 +1118,7 @@ TEST(CChainedDiamondsReduce_x_false) {
Diamond d2(R, R.zero);
d2.chain(d1);
- R.ReduceMergeIterative(d1.merge, d2.merge);
+ R.ReduceMergeIterative(R.start, d2.merge);
}
@@ -1128,8 +1128,7 @@ TEST(CChainedDiamondsReduce_false_x) {
Diamond d2(R, R.p0);
d2.chain(d1);
- R.ReduceMergeIterative(d2.merge, d2.merge);
- CheckInputs(d2.branch, R.p0, R.start);
+ R.ReduceMergeIterative(R.start, d2.merge);
}
@@ -1190,6 +1189,28 @@ TEST(CNestedDiamonds_xyz) {
}
+TEST(CUnusedDiamond1) {
+ ControlReducerTester R;
+ // if (p0) { } else { }
+ Node* branch = R.graph.NewNode(R.common.Branch(), R.p0, R.start);
+ Node* if_true = R.graph.NewNode(R.common.IfTrue(), branch);
+ Node* if_false = R.graph.NewNode(R.common.IfFalse(), branch);
+ Node* merge = R.graph.NewNode(R.common.Merge(2), if_true, if_false);
+ R.ReduceMergeIterative(R.start, merge);
+}
+
+
+TEST(CUnusedDiamond2) {
+ ControlReducerTester R;
+ // if (p0) { } else { }
+ Node* branch = R.graph.NewNode(R.common.Branch(), R.p0, R.start);
+ Node* if_true = R.graph.NewNode(R.common.IfTrue(), branch);
+ Node* if_false = R.graph.NewNode(R.common.IfFalse(), branch);
+ Node* merge = R.graph.NewNode(R.common.Merge(2), if_false, if_true);
+ R.ReduceMergeIterative(R.start, merge);
+}
+
+
TEST(CDeadLoop1) {
ControlReducerTester R;
@@ -1329,9 +1350,7 @@ TEST(Return_nested_diamonds1) {
CheckInputs(ret, d1.phi, R.start, d1.merge);
CheckInputs(d1.phi, R.one, R.zero, d1.merge);
- CheckInputs(d1.merge, d2.merge, d3.merge);
- CheckLiveDiamond(d2);
- CheckLiveDiamond(d3);
+ CheckInputs(d1.merge, d1.if_true, d1.if_false);
}
@@ -1348,11 +1367,7 @@ TEST(Return_nested_diamonds_true1) {
R.ReduceGraph(); // d1 gets folded true.
- CheckInputs(ret, R.one, R.start, d2.merge);
- CheckInputs(d2.branch, R.p0, R.start);
- CheckDeadDiamond(d1);
- CheckLiveDiamond(d2);
- CheckDeadDiamond(d3);
+ CheckInputs(ret, R.one, R.start, R.start);
}
@@ -1369,11 +1384,7 @@ TEST(Return_nested_diamonds_false1) {
R.ReduceGraph(); // d1 gets folded false.
- CheckInputs(ret, R.zero, R.start, d3.merge);
- CheckInputs(d3.branch, R.p0, R.start);
- CheckDeadDiamond(d1);
- CheckDeadDiamond(d2);
- CheckLiveDiamond(d3);
+ CheckInputs(ret, R.zero, R.start, R.start);
}
diff --git a/deps/v8/test/cctest/compiler/test-instruction.cc b/deps/v8/test/cctest/compiler/test-instruction.cc
index 85cc870e9d..22d46e7e39 100644
--- a/deps/v8/test/cctest/compiler/test-instruction.cc
+++ b/deps/v8/test/cctest/compiler/test-instruction.cc
@@ -27,20 +27,14 @@ typedef v8::internal::compiler::InstructionSequence TestInstrSeq;
class InstructionTester : public HandleAndZoneScope {
public: // We're all friends here.
InstructionTester()
- : isolate(main_isolate()),
- graph(zone()),
+ : graph(zone()),
schedule(zone()),
- fake_stub(main_isolate()),
- info(&fake_stub, main_isolate()),
common(zone()),
machine(zone()),
code(NULL) {}
- Isolate* isolate;
Graph graph;
Schedule schedule;
- FakeStubForTesting fake_stub;
- CompilationInfoWithZone info;
CommonOperatorBuilder common;
MachineOperatorBuilder machine;
TestInstrSeq* code;
@@ -93,8 +87,12 @@ class InstructionTester : public HandleAndZoneScope {
return UnallocatedOperand(UnallocatedOperand::ANY, vreg).Copy(zone());
}
+ RpoNumber RpoFor(BasicBlock* block) {
+ return RpoNumber::FromInt(block->rpo_number());
+ }
+
InstructionBlock* BlockAt(BasicBlock* block) {
- return code->InstructionBlockAt(block->GetRpoNumber());
+ return code->InstructionBlockAt(RpoFor(block));
}
BasicBlock* GetBasicBlock(int instruction_index) {
const InstructionBlock* block =
@@ -131,7 +129,6 @@ TEST(InstructionBasic) {
for (auto block : *blocks) {
CHECK_EQ(block->rpo_number(), R.BlockAt(block)->rpo_number().ToInt());
- CHECK_EQ(block->id().ToInt(), R.BlockAt(block)->id().ToInt());
CHECK(!block->loop_end());
}
}
@@ -151,23 +148,23 @@ TEST(InstructionGetBasicBlock) {
R.allocCode();
- R.code->StartBlock(b0->GetRpoNumber());
+ R.code->StartBlock(R.RpoFor(b0));
int i0 = R.NewInstr();
int i1 = R.NewInstr();
- R.code->EndBlock(b0->GetRpoNumber());
- R.code->StartBlock(b1->GetRpoNumber());
+ R.code->EndBlock(R.RpoFor(b0));
+ R.code->StartBlock(R.RpoFor(b1));
int i2 = R.NewInstr();
int i3 = R.NewInstr();
int i4 = R.NewInstr();
int i5 = R.NewInstr();
- R.code->EndBlock(b1->GetRpoNumber());
- R.code->StartBlock(b2->GetRpoNumber());
+ R.code->EndBlock(R.RpoFor(b1));
+ R.code->StartBlock(R.RpoFor(b2));
int i6 = R.NewInstr();
int i7 = R.NewInstr();
int i8 = R.NewInstr();
- R.code->EndBlock(b2->GetRpoNumber());
- R.code->StartBlock(b3->GetRpoNumber());
- R.code->EndBlock(b3->GetRpoNumber());
+ R.code->EndBlock(R.RpoFor(b2));
+ R.code->StartBlock(R.RpoFor(b3));
+ R.code->EndBlock(R.RpoFor(b3));
CHECK_EQ(b0, R.GetBasicBlock(i0));
CHECK_EQ(b0, R.GetBasicBlock(i1));
@@ -203,11 +200,11 @@ TEST(InstructionIsGapAt) {
R.allocCode();
TestInstr* i0 = TestInstr::New(R.zone(), 100);
- TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
- R.code->StartBlock(b0->GetRpoNumber());
+ TestInstr* g = TestInstr::New(R.zone(), 103);
+ R.code->StartBlock(R.RpoFor(b0));
R.code->AddInstruction(i0);
R.code->AddInstruction(g);
- R.code->EndBlock(b0->GetRpoNumber());
+ R.code->EndBlock(R.RpoFor(b0));
CHECK(R.code->instructions().size() == 4);
for (size_t i = 0; i < R.code->instructions().size(); ++i) {
@@ -226,18 +223,18 @@ TEST(InstructionIsGapAt2) {
R.allocCode();
TestInstr* i0 = TestInstr::New(R.zone(), 100);
- TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
- R.code->StartBlock(b0->GetRpoNumber());
+ TestInstr* g = TestInstr::New(R.zone(), 103);
+ R.code->StartBlock(R.RpoFor(b0));
R.code->AddInstruction(i0);
R.code->AddInstruction(g);
- R.code->EndBlock(b0->GetRpoNumber());
+ R.code->EndBlock(R.RpoFor(b0));
TestInstr* i1 = TestInstr::New(R.zone(), 102);
- TestInstr* g1 = TestInstr::New(R.zone(), 104)->MarkAsControl();
- R.code->StartBlock(b1->GetRpoNumber());
+ TestInstr* g1 = TestInstr::New(R.zone(), 104);
+ R.code->StartBlock(R.RpoFor(b1));
R.code->AddInstruction(i1);
R.code->AddInstruction(g1);
- R.code->EndBlock(b1->GetRpoNumber());
+ R.code->EndBlock(R.RpoFor(b1));
CHECK(R.code->instructions().size() == 8);
for (size_t i = 0; i < R.code->instructions().size(); ++i) {
@@ -254,11 +251,11 @@ TEST(InstructionAddGapMove) {
R.allocCode();
TestInstr* i0 = TestInstr::New(R.zone(), 100);
- TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
- R.code->StartBlock(b0->GetRpoNumber());
+ TestInstr* g = TestInstr::New(R.zone(), 103);
+ R.code->StartBlock(R.RpoFor(b0));
R.code->AddInstruction(i0);
R.code->AddInstruction(g);
- R.code->EndBlock(b0->GetRpoNumber());
+ R.code->EndBlock(R.RpoFor(b0));
CHECK(R.code->instructions().size() == 4);
for (size_t i = 0; i < R.code->instructions().size(); ++i) {
diff --git a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
index f4531d3e83..cc4b2bdef2 100644
--- a/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
@@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/compiler/graph-inl.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
@@ -68,7 +67,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* stack = graph.NewNode(common.StateValues(0));
Node* state_node =
- graph.NewNode(common.FrameState(JS_FRAME, BailoutId(0),
+ graph.NewNode(common.FrameState(JS_FRAME, BailoutId::None(),
OutputFrameStateCombine::Ignore()),
parameters, locals, stack, context, UndefinedConstant());
@@ -114,9 +113,13 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* Binop(const Operator* op, Node* left, Node* right) {
// JS binops also require context, effect, and control
- if (OperatorProperties::HasFrameStateInput(op)) {
+ if (OperatorProperties::GetFrameStateInputCount(op) == 1) {
return graph.NewNode(op, left, right, context(),
EmptyFrameState(context()), start(), control());
+ } else if (OperatorProperties::GetFrameStateInputCount(op) == 2) {
+ return graph.NewNode(op, left, right, context(),
+ EmptyFrameState(context()),
+ EmptyFrameState(context()), start(), control());
} else {
return graph.NewNode(op, left, right, context(), start(), control());
}
@@ -124,7 +127,8 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* Unop(const Operator* op, Node* input) {
// JS unops also require context, effect, and control
- if (OperatorProperties::HasFrameStateInput(op)) {
+ if (OperatorProperties::GetFrameStateInputCount(op) > 0) {
+ DCHECK(OperatorProperties::GetFrameStateInputCount(op) == 1);
return graph.NewNode(op, input, context(), EmptyFrameState(context()),
start(), control());
} else {
@@ -134,7 +138,10 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
Node* UseForEffect(Node* node) {
// TODO(titzer): use EffectPhi after fixing EffectCount
- if (OperatorProperties::HasFrameStateInput(javascript.ToNumber())) {
+ if (OperatorProperties::GetFrameStateInputCount(javascript.ToNumber()) >
+ 0) {
+ DCHECK(OperatorProperties::GetFrameStateInputCount(
+ javascript.ToNumber()) == 1);
return graph.NewNode(javascript.ToNumber(), node, context(),
EmptyFrameState(context()), node, control());
} else {
@@ -500,42 +507,6 @@ TEST(JSToNumberOfNumberOrOtherPrimitive) {
}
-TEST(JSToBoolean) {
- JSTypedLoweringTester R;
- const Operator* op = R.javascript.ToBoolean();
-
- { // ToBoolean(undefined)
- Node* r = R.ReduceUnop(op, Type::Undefined());
- R.CheckFalse(r);
- }
-
- { // ToBoolean(null)
- Node* r = R.ReduceUnop(op, Type::Null());
- R.CheckFalse(r);
- }
-
- { // ToBoolean(boolean)
- Node* r = R.ReduceUnop(op, Type::Boolean());
- CHECK_EQ(IrOpcode::kParameter, r->opcode());
- }
-
- { // ToBoolean(object)
- Node* r = R.ReduceUnop(op, Type::DetectableObject());
- R.CheckTrue(r);
- }
-
- { // ToBoolean(undetectable)
- Node* r = R.ReduceUnop(op, Type::Undetectable());
- R.CheckFalse(r);
- }
-
- { // ToBoolean(object)
- Node* r = R.ReduceUnop(op, Type::Object());
- CHECK_EQ(IrOpcode::kAnyToBoolean, r->opcode());
- }
-}
-
-
TEST(JSToString1) {
JSTypedLoweringTester R;
@@ -717,24 +688,6 @@ TEST(MixedComparison1) {
}
-TEST(UnaryNot) {
- JSTypedLoweringTester R;
- const Operator* opnot = R.javascript.UnaryNot();
-
- for (size_t i = 0; i < arraysize(kJSTypes); i++) {
- Node* orig = R.Unop(opnot, R.Parameter(kJSTypes[i]));
- Node* r = R.reduce(orig);
-
- if (r == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
- // The original node was turned into a ToBoolean.
- CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
- } else if (r->opcode() != IrOpcode::kHeapConstant) {
- CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
- }
- }
-}
-
-
TEST(RemoveToNumberEffects) {
FLAG_turbo_deoptimization = true;
@@ -749,8 +702,9 @@ TEST(RemoveToNumberEffects) {
switch (i) {
case 0:
- // TODO(jarin) Replace with a query of FLAG_turbo_deoptimization.
- if (OperatorProperties::HasFrameStateInput(R.javascript.ToNumber())) {
+ if (FLAG_turbo_deoptimization) {
+ DCHECK(OperatorProperties::GetFrameStateInputCount(
+ R.javascript.ToNumber()) == 1);
effect_use = R.graph.NewNode(R.javascript.ToNumber(), p0, R.context(),
frame_state, ton, R.start());
} else {
@@ -759,8 +713,9 @@ TEST(RemoveToNumberEffects) {
}
break;
case 1:
- // TODO(jarin) Replace with a query of FLAG_turbo_deoptimization.
- if (OperatorProperties::HasFrameStateInput(R.javascript.ToNumber())) {
+ if (FLAG_turbo_deoptimization) {
+ DCHECK(OperatorProperties::GetFrameStateInputCount(
+ R.javascript.ToNumber()) == 1);
effect_use =
R.graph.NewNode(R.javascript.ToNumber(), ton, R.context(),
frame_state, ton, R.start());
@@ -773,11 +728,11 @@ TEST(RemoveToNumberEffects) {
effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
case 3:
effect_use = R.graph.NewNode(R.javascript.Add(), ton, ton, R.context(),
- frame_state, ton, R.start());
+ frame_state, frame_state, ton, R.start());
break;
case 4:
effect_use = R.graph.NewNode(R.javascript.Add(), p0, p0, R.context(),
- frame_state, ton, R.start());
+ frame_state, frame_state, ton, R.start());
break;
case 5:
effect_use = R.graph.NewNode(R.common.Return(), p0, ton, R.start());
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
index d9de18efad..b2d3008aa9 100644
--- a/deps/v8/test/cctest/compiler/test-jump-threading.cc
+++ b/deps/v8/test/cctest/compiler/test-jump-threading.cc
@@ -13,8 +13,6 @@ namespace v8 {
namespace internal {
namespace compiler {
-typedef BasicBlock::RpoNumber RpoNumber;
-
class TestCode : public HandleAndZoneScope {
public:
TestCode()
@@ -32,8 +30,8 @@ class TestCode : public HandleAndZoneScope {
int Jump(int target) {
Start();
InstructionOperand ops[] = {UseRpo(target)};
- sequence_.AddInstruction(Instruction::New(main_zone(), kArchJmp, 0, NULL, 1,
- ops, 0, NULL)->MarkAsControl());
+ sequence_.AddInstruction(
+ Instruction::New(main_zone(), kArchJmp, 0, NULL, 1, ops, 0, NULL));
int pos = static_cast<int>(sequence_.instructions().size() - 1);
End();
return pos;
@@ -47,8 +45,8 @@ class TestCode : public HandleAndZoneScope {
InstructionOperand ops[] = {UseRpo(ttarget), UseRpo(ftarget)};
InstructionCode code = 119 | FlagsModeField::encode(kFlags_branch) |
FlagsConditionField::encode(kEqual);
- sequence_.AddInstruction(Instruction::New(main_zone(), code, 0, NULL, 2,
- ops, 0, NULL)->MarkAsControl());
+ sequence_.AddInstruction(
+ Instruction::New(main_zone(), code, 0, NULL, 2, ops, 0, NULL));
int pos = static_cast<int>(sequence_.instructions().size() - 1);
End();
return pos;
@@ -87,9 +85,9 @@ class TestCode : public HandleAndZoneScope {
}
void Start(bool deferred = false) {
if (current_ == NULL) {
- current_ = new (main_zone()) InstructionBlock(
- main_zone(), BasicBlock::Id::FromInt(rpo_number_.ToInt()),
- rpo_number_, RpoNumber::Invalid(), RpoNumber::Invalid(), deferred);
+ current_ = new (main_zone())
+ InstructionBlock(main_zone(), rpo_number_, RpoNumber::Invalid(),
+ RpoNumber::Invalid(), deferred);
blocks_.push_back(current_);
sequence_.StartBlock(rpo_number_);
}
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index 13695b2e0b..212ff3a8f2 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -6,6 +6,7 @@
#include "src/code-stubs.h"
#include "src/compiler.h"
+#include "src/parser.h"
#include "src/zone.h"
#include "src/compiler/common-operator.h"
@@ -33,7 +34,7 @@ static Handle<JSFunction> Compile(const char* source) {
->NewStringFromUtf8(CStrVector(source))
.ToHandleChecked();
Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
- source_code, Handle<String>(), 0, 0, false, false,
+ source_code, Handle<String>(), 0, 0, false, false, Handle<Object>(),
Handle<Context>(isolate->native_context()), NULL, NULL,
v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE, false);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -42,25 +43,25 @@ static Handle<JSFunction> Compile(const char* source) {
TEST(TestLinkageCreate) {
- InitializedHandleScope handles;
+ HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + b");
- CompilationInfoWithZone info(function);
+ ParseInfo parse_info(handles.main_zone(), function);
+ CompilationInfo info(&parse_info);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
}
TEST(TestLinkageJSFunctionIncoming) {
- InitializedHandleScope handles;
-
const char* sources[] = {"(function() { })", "(function(a) { })",
"(function(a,b) { })", "(function(a,b,c) { })"};
for (int i = 0; i < 3; i++) {
- i::HandleScope handles(CcTest::i_isolate());
+ HandleAndZoneScope handles;
Handle<JSFunction> function = v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(CompileRun(sources[i])));
- CompilationInfoWithZone info(function);
+ ParseInfo parse_info(handles.main_zone(), function);
+ CompilationInfo info(&parse_info);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
@@ -74,9 +75,10 @@ TEST(TestLinkageJSFunctionIncoming) {
TEST(TestLinkageCodeStubIncoming) {
Isolate* isolate = CcTest::InitIsolateOnce();
+ Zone zone;
ToNumberStub stub(isolate);
- CompilationInfoWithZone info(&stub, isolate);
- CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
+ CompilationInfo info(&stub, isolate, &zone);
+ CallDescriptor* descriptor = Linkage::ComputeIncoming(&zone, &info);
CHECK(descriptor);
CHECK_EQ(1, static_cast<int>(descriptor->JSParameterCount()));
CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
@@ -88,7 +90,8 @@ TEST(TestLinkageCodeStubIncoming) {
TEST(TestLinkageJSCall) {
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + c");
- CompilationInfoWithZone info(function);
+ ParseInfo parse_info(handles.main_zone(), function);
+ CompilationInfo info(&parse_info);
for (int i = 0; i < 32; i++) {
CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
diff --git a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
index 7513307bab..5f9820c72a 100644
--- a/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
+++ b/deps/v8/test/cctest/compiler/test-loop-assignment-analysis.cc
@@ -29,14 +29,15 @@ struct TestHelper : public HandleAndZoneScope {
void CheckLoopAssignedCount(int expected, const char* var_name) {
// TODO(titzer): don't scope analyze every single time.
- CompilationInfo info(function, main_zone());
+ ParseInfo parse_info(main_zone(), function);
+ CompilationInfo info(&parse_info);
- CHECK(Parser::ParseStatic(&info));
- CHECK(Rewriter::Rewrite(&info));
- CHECK(Scope::Analyze(&info));
+ CHECK(Parser::ParseStatic(&parse_info));
+ CHECK(Rewriter::Rewrite(&parse_info));
+ CHECK(Scope::Analyze(&parse_info));
Scope* scope = info.function()->scope();
- AstValueFactory* factory = info.ast_value_factory();
+ AstValueFactory* factory = parse_info.ast_value_factory();
CHECK(scope);
if (result == NULL) {
diff --git a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
index 7ee5751875..beedc459e7 100644
--- a/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -6,7 +6,6 @@
#include "src/base/utils/random-number-generator.h"
#include "src/codegen.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/operator-properties.h"
diff --git a/deps/v8/test/cctest/compiler/test-node-algorithm.cc b/deps/v8/test/cctest/compiler/test-node-algorithm.cc
index 842d18272e..0cb77011aa 100644
--- a/deps/v8/test/cctest/compiler/test-node-algorithm.cc
+++ b/deps/v8/test/cctest/compiler/test-node-algorithm.cc
@@ -9,7 +9,6 @@
#include "graph-tester.h"
#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/graph-inl.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
@@ -20,45 +19,6 @@ using namespace v8::internal::compiler;
static Operator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
"dummy", 0, 0, 0, 1, 0, 0);
-class PreNodeVisitor : public NullNodeVisitor {
- public:
- void Pre(Node* node) {
- printf("NODE ID: %d\n", node->id());
- nodes_.push_back(node);
- }
- std::vector<Node*> nodes_;
-};
-
-
-class PostNodeVisitor : public NullNodeVisitor {
- public:
- void Post(Node* node) {
- printf("NODE ID: %d\n", node->id());
- nodes_.push_back(node);
- }
- std::vector<Node*> nodes_;
-};
-
-
-TEST(TestInputNodePreOrderVisitSimple) {
- GraphWithStartNodeTester graph;
- Node* n2 = graph.NewNode(&dummy_operator, graph.start());
- Node* n3 = graph.NewNode(&dummy_operator, n2);
- Node* n4 = graph.NewNode(&dummy_operator, n2, n3);
- Node* n5 = graph.NewNode(&dummy_operator, n4, n2);
- graph.SetEnd(n5);
-
- PreNodeVisitor node_visitor;
- graph.VisitNodeInputsFromEnd(&node_visitor);
- CHECK_EQ(5, static_cast<int>(node_visitor.nodes_.size()));
- CHECK(n5->id() == node_visitor.nodes_[0]->id());
- CHECK(n4->id() == node_visitor.nodes_[1]->id());
- CHECK(n2->id() == node_visitor.nodes_[2]->id());
- CHECK(graph.start()->id() == node_visitor.nodes_[3]->id());
- CHECK(n3->id() == node_visitor.nodes_[4]->id());
-}
-
-
TEST(TestPrintNodeGraphToNodeGraphviz) {
GraphWithStartNodeTester graph;
Node* n2 = graph.NewNode(&dummy_operator, graph.start());
diff --git a/deps/v8/test/cctest/compiler/test-node.cc b/deps/v8/test/cctest/compiler/test-node.cc
index 2c51e26f86..5bb21f585d 100644
--- a/deps/v8/test/cctest/compiler/test-node.cc
+++ b/deps/v8/test/cctest/compiler/test-node.cc
@@ -13,32 +13,143 @@
using namespace v8::internal;
using namespace v8::internal::compiler;
+#define NONE reinterpret_cast<Node*>(1)
+
static Operator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
"dummy", 0, 0, 0, 1, 0, 0);
+#define CHECK_USES(node, ...) \
+ do { \
+ Node* __array[] = {__VA_ARGS__}; \
+ int __size = \
+ __array[0] != NONE ? static_cast<int>(arraysize(__array)) : 0; \
+ CheckUseChain(node, __array, __size); \
+ } while (false)
+
+
+typedef std::multiset<Node*, std::less<Node*>> NodeMSet;
+
+static void CheckUseChain(Node* node, Node** uses, int use_count) {
+ // Check ownership.
+ if (use_count == 1) CHECK(node->OwnedBy(uses[0]));
+ if (use_count > 1) {
+ for (int i = 0; i < use_count; i++) {
+ CHECK(!node->OwnedBy(uses[i]));
+ }
+ }
+
+ // Check the self-reported use count.
+ CHECK_EQ(use_count, node->UseCount());
+
+ // Build the expectation set.
+ NodeMSet expect_set;
+ for (int i = 0; i < use_count; i++) {
+ expect_set.insert(uses[i]);
+ }
+
+ {
+ // Check that iterating over the uses gives the right counts.
+ NodeMSet use_set;
+ for (auto use : node->uses()) {
+ use_set.insert(use);
+ }
+ CHECK(expect_set == use_set);
+ }
+
+ {
+ // Check that iterating over the use edges gives the right counts,
+ // input indices, from(), and to() pointers.
+ NodeMSet use_set;
+ for (auto edge : node->use_edges()) {
+ CHECK_EQ(node, edge.to());
+ CHECK_EQ(node, edge.from()->InputAt(edge.index()));
+ use_set.insert(edge.from());
+ }
+ CHECK(expect_set == use_set);
+ }
+
+ {
+ // Check the use nodes actually have the node as inputs.
+ for (Node* use : node->uses()) {
+ size_t count = 0;
+ for (Node* input : use->inputs()) {
+ if (input == node) count++;
+ }
+ CHECK_EQ(count, expect_set.count(use));
+ }
+ }
+}
+
+
+#define CHECK_INPUTS(node, ...) \
+ do { \
+ Node* __array[] = {__VA_ARGS__}; \
+ int __size = \
+ __array[0] != NONE ? static_cast<int>(arraysize(__array)) : 0; \
+ CheckInputs(node, __array, __size); \
+ } while (false)
+
+
+static void CheckInputs(Node* node, Node** inputs, int input_count) {
+ CHECK_EQ(input_count, node->InputCount());
+ // Check InputAt().
+ for (int i = 0; i < static_cast<int>(input_count); i++) {
+ CHECK_EQ(inputs[i], node->InputAt(i));
+ }
+
+ // Check input iterator.
+ int index = 0;
+ for (Node* input : node->inputs()) {
+ CHECK_EQ(inputs[index], input);
+ index++;
+ }
+
+ // Check use lists of inputs.
+ for (int i = 0; i < static_cast<int>(input_count); i++) {
+ Node* input = inputs[i];
+ if (!input) continue; // skip null inputs
+ bool found = false;
+ // Check regular use list.
+ for (Node* use : input->uses()) {
+ if (use == node) {
+ found = true;
+ break;
+ }
+ }
+ CHECK(found);
+ int count = 0;
+ // Check use edge list.
+ for (auto edge : input->use_edges()) {
+ if (edge.from() == node && edge.to() == input && edge.index() == i) {
+ count++;
+ }
+ }
+ CHECK_EQ(1, count);
+ }
+}
+
+
TEST(NodeUseIteratorReplaceUses) {
GraphTester graph;
Node* n0 = graph.NewNode(&dummy_operator);
Node* n1 = graph.NewNode(&dummy_operator, n0);
Node* n2 = graph.NewNode(&dummy_operator, n0);
Node* n3 = graph.NewNode(&dummy_operator);
- auto i1(n0->uses().begin());
- CHECK_EQ(n1, *i1);
- ++i1;
- CHECK_EQ(n2, *i1);
+
+ CHECK_USES(n0, n1, n2);
+
+ CHECK_INPUTS(n1, n0);
+ CHECK_INPUTS(n2, n0);
+
n0->ReplaceUses(n3);
- auto i2(n3->uses().begin());
- CHECK_EQ(n1, *i2);
- ++i2;
- CHECK_EQ(n2, *i2);
- auto i3(n1->inputs().begin());
- CHECK_EQ(n3, *i3);
- ++i3;
- CHECK(n1->inputs().end() == i3);
- auto i4(n2->inputs().begin());
- CHECK_EQ(n3, *i4);
- ++i4;
- CHECK(n2->inputs().end() == i4);
+
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, NONE);
+ CHECK_USES(n2, NONE);
+ CHECK_USES(n3, n1, n2);
+
+ CHECK_INPUTS(n1, n3);
+ CHECK_INPUTS(n2, n3);
}
@@ -46,21 +157,22 @@ TEST(NodeUseIteratorReplaceUsesSelf) {
GraphTester graph;
Node* n0 = graph.NewNode(&dummy_operator);
Node* n1 = graph.NewNode(&dummy_operator, n0);
- Node* n3 = graph.NewNode(&dummy_operator);
+
+ CHECK_USES(n0, n1);
+ CHECK_USES(n1, NONE);
n1->ReplaceInput(0, n1); // Create self-reference.
- auto i1(n1->uses().begin());
- CHECK_EQ(n1, *i1);
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, n1);
- n1->ReplaceUses(n3);
+ Node* n2 = graph.NewNode(&dummy_operator);
- CHECK(n1->uses().begin() == n1->uses().end());
+ n1->ReplaceUses(n2);
- auto i2(n3->uses().begin());
- CHECK_EQ(n1, *i2);
- ++i2;
- CHECK(n1->uses().end() == i2);
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, NONE);
+ CHECK_USES(n2, n1);
}
@@ -70,48 +182,22 @@ TEST(ReplaceInput) {
Node* n1 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator);
Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
- auto i1(n3->inputs().begin());
- CHECK(n0 == *i1);
- CHECK_EQ(n0, n3->InputAt(0));
- ++i1;
- CHECK_EQ(n1, *i1);
- CHECK_EQ(n1, n3->InputAt(1));
- ++i1;
- CHECK_EQ(n2, *i1);
- CHECK_EQ(n2, n3->InputAt(2));
- ++i1;
- CHECK(i1 == n3->inputs().end());
-
- auto i2(n1->uses().begin());
- CHECK_EQ(n3, *i2);
- ++i2;
- CHECK(i2 == n1->uses().end());
-
Node* n4 = graph.NewNode(&dummy_operator);
- auto i3(n4->uses().begin());
- CHECK(i3 == n4->uses().end());
+
+ CHECK_USES(n0, n3);
+ CHECK_USES(n1, n3);
+ CHECK_USES(n2, n3);
+ CHECK_USES(n3, NONE);
+ CHECK_USES(n4, NONE);
+
+ CHECK_INPUTS(n3, n0, n1, n2);
n3->ReplaceInput(1, n4);
- auto i4(n1->uses().begin());
- CHECK(i4 == n1->uses().end());
-
- auto i5(n4->uses().begin());
- CHECK_EQ(n3, *i5);
- ++i5;
- CHECK(i5 == n4->uses().end());
-
- auto i6(n3->inputs().begin());
- CHECK(n0 == *i6);
- CHECK_EQ(n0, n3->InputAt(0));
- ++i6;
- CHECK_EQ(n4, *i6);
- CHECK_EQ(n4, n3->InputAt(1));
- ++i6;
- CHECK_EQ(n2, *i6);
- CHECK_EQ(n2, n3->InputAt(2));
- ++i6;
- CHECK(i6 == n3->inputs().end());
+ CHECK_USES(n1, NONE);
+ CHECK_USES(n4, n3);
+
+ CHECK_INPUTS(n3, n0, n4, n2);
}
@@ -169,16 +255,19 @@ TEST(Uses) {
Node* n0 = graph.NewNode(&dummy_operator);
Node* n1 = graph.NewNode(&dummy_operator, n0);
- CHECK_EQ(1, n0->UseCount());
- printf("A: %d vs %d\n", n0->UseAt(0)->id(), n1->id());
- CHECK(n0->UseAt(0) == n1);
+
+ CHECK_USES(n0, n1);
+ CHECK_USES(n1, NONE);
+
Node* n2 = graph.NewNode(&dummy_operator, n0);
- CHECK_EQ(2, n0->UseCount());
- printf("B: %d vs %d\n", n0->UseAt(1)->id(), n2->id());
- CHECK(n0->UseAt(1) == n2);
+
+ CHECK_USES(n0, n1, n2);
+ CHECK_USES(n2, NONE);
+
Node* n3 = graph.NewNode(&dummy_operator, n0);
- CHECK_EQ(3, n0->UseCount());
- CHECK(n0->UseAt(2) == n3);
+
+ CHECK_USES(n0, n1, n2, n3);
+ CHECK_USES(n3, NONE);
}
@@ -189,39 +278,23 @@ TEST(Inputs) {
Node* n1 = graph.NewNode(&dummy_operator, n0);
Node* n2 = graph.NewNode(&dummy_operator, n0);
Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
- CHECK_EQ(3, n3->InputCount());
- CHECK(n3->InputAt(0) == n0);
- CHECK(n3->InputAt(1) == n1);
- CHECK(n3->InputAt(2) == n2);
+
+ CHECK_INPUTS(n3, n0, n1, n2);
+
Node* n4 = graph.NewNode(&dummy_operator, n0, n1, n2);
n3->AppendInput(graph.zone(), n4);
- CHECK_EQ(4, n3->InputCount());
- CHECK(n3->InputAt(0) == n0);
- CHECK(n3->InputAt(1) == n1);
- CHECK(n3->InputAt(2) == n2);
- CHECK(n3->InputAt(3) == n4);
- Node* n5 = graph.NewNode(&dummy_operator, n4);
+
+ CHECK_INPUTS(n3, n0, n1, n2, n4);
+ CHECK_USES(n4, n3);
+
n3->AppendInput(graph.zone(), n4);
- CHECK_EQ(5, n3->InputCount());
- CHECK(n3->InputAt(0) == n0);
- CHECK(n3->InputAt(1) == n1);
- CHECK(n3->InputAt(2) == n2);
- CHECK(n3->InputAt(3) == n4);
- CHECK(n3->InputAt(4) == n4);
-
- // Make sure uses have been hooked op correctly.
- Node::Uses uses(n4->uses());
- auto current = uses.begin();
- CHECK(current != uses.end());
- CHECK(*current == n3);
- ++current;
- CHECK(current != uses.end());
- CHECK(*current == n5);
- ++current;
- CHECK(current != uses.end());
- CHECK(*current == n3);
- ++current;
- CHECK(current == uses.end());
+
+ CHECK_INPUTS(n3, n0, n1, n2, n4, n4);
+ CHECK_USES(n4, n3, n3);
+
+ Node* n5 = graph.NewNode(&dummy_operator, n4);
+
+ CHECK_USES(n4, n3, n3, n5);
}
@@ -232,17 +305,25 @@ TEST(RemoveInput) {
Node* n1 = graph.NewNode(&dummy_operator, n0);
Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+ CHECK_INPUTS(n0, NONE);
+ CHECK_INPUTS(n1, n0);
+ CHECK_INPUTS(n2, n0, n1);
+ CHECK_USES(n0, n1, n2);
+
n1->RemoveInput(0);
- CHECK_EQ(0, n1->InputCount());
- CHECK_EQ(1, n0->UseCount());
+ CHECK_INPUTS(n1, NONE);
+ CHECK_USES(n0, n2);
n2->RemoveInput(0);
- CHECK_EQ(1, n2->InputCount());
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(1, n1->UseCount());
+ CHECK_INPUTS(n2, n1);
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, n2);
n2->RemoveInput(0);
- CHECK_EQ(0, n2->InputCount());
+ CHECK_INPUTS(n2, NONE);
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, NONE);
+ CHECK_USES(n2, NONE);
}
@@ -253,33 +334,17 @@ TEST(AppendInputsAndIterator) {
Node* n1 = graph.NewNode(&dummy_operator, n0);
Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
- Node::InputEdges inputs(n2->input_edges());
- Node::InputEdges::iterator current = inputs.begin();
- CHECK(current != inputs.end());
- CHECK((*current).to() == n0);
- ++current;
- CHECK(current != inputs.end());
- CHECK((*current).to() == n1);
- ++current;
- CHECK(current == inputs.end());
+ CHECK_INPUTS(n0, NONE);
+ CHECK_INPUTS(n1, n0);
+ CHECK_INPUTS(n2, n0, n1);
+ CHECK_USES(n0, n1, n2);
Node* n3 = graph.NewNode(&dummy_operator);
+
n2->AppendInput(graph.zone(), n3);
- inputs = n2->input_edges();
- current = inputs.begin();
- CHECK(current != inputs.end());
- CHECK((*current).to() == n0);
- CHECK_EQ(0, (*current).index());
- ++current;
- CHECK(current != inputs.end());
- CHECK((*current).to() == n1);
- CHECK_EQ(1, (*current).index());
- ++current;
- CHECK(current != inputs.end());
- CHECK((*current).to() == n3);
- CHECK_EQ(2, (*current).index());
- ++current;
- CHECK(current == inputs.end());
+
+ CHECK_INPUTS(n2, n0, n1, n3);
+ CHECK_USES(n3, n2);
}
@@ -289,15 +354,23 @@ TEST(NullInputsSimple) {
Node* n0 = graph.NewNode(&dummy_operator);
Node* n1 = graph.NewNode(&dummy_operator, n0);
Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
- CHECK_EQ(2, n2->InputCount());
-
- CHECK(n0 == n2->InputAt(0));
- CHECK(n1 == n2->InputAt(1));
- CHECK_EQ(2, n0->UseCount());
- n2->ReplaceInput(0, NULL);
- CHECK(NULL == n2->InputAt(0));
- CHECK(n1 == n2->InputAt(1));
- CHECK_EQ(1, n0->UseCount());
+
+ CHECK_INPUTS(n0, NONE);
+ CHECK_INPUTS(n1, n0);
+ CHECK_INPUTS(n2, n0, n1);
+ CHECK_USES(n0, n1, n2);
+
+ n2->ReplaceInput(0, nullptr);
+
+ CHECK_INPUTS(n2, NULL, n1);
+
+ CHECK_USES(n0, n1);
+
+ n2->ReplaceInput(1, nullptr);
+
+ CHECK_INPUTS(n2, NULL, NULL);
+
+ CHECK_USES(n1, NONE);
}
@@ -310,17 +383,16 @@ TEST(NullInputsAppended) {
Node* n3 = graph.NewNode(&dummy_operator, n0);
n3->AppendInput(graph.zone(), n1);
n3->AppendInput(graph.zone(), n2);
- CHECK_EQ(3, n3->InputCount());
- CHECK(n0 == n3->InputAt(0));
- CHECK(n1 == n3->InputAt(1));
- CHECK(n2 == n3->InputAt(2));
- CHECK_EQ(1, n1->UseCount());
+ CHECK_INPUTS(n3, n0, n1, n2);
+ CHECK_USES(n0, n1, n2, n3);
+ CHECK_USES(n1, n3);
+ CHECK_USES(n2, n3);
+
n3->ReplaceInput(1, NULL);
- CHECK(n0 == n3->InputAt(0));
- CHECK(NULL == n3->InputAt(1));
- CHECK(n2 == n3->InputAt(2));
- CHECK_EQ(0, n1->UseCount());
+ CHECK_USES(n1, NONE);
+
+ CHECK_INPUTS(n3, n0, NULL, n2);
}
@@ -331,26 +403,23 @@ TEST(ReplaceUsesFromAppendedInputs) {
Node* n1 = graph.NewNode(&dummy_operator, n0);
Node* n2 = graph.NewNode(&dummy_operator, n0);
Node* n3 = graph.NewNode(&dummy_operator);
+
+ CHECK_INPUTS(n2, n0);
+
n2->AppendInput(graph.zone(), n1);
+ CHECK_INPUTS(n2, n0, n1);
+ CHECK_USES(n1, n2);
+
n2->AppendInput(graph.zone(), n0);
- CHECK_EQ(0, n3->UseCount());
- CHECK_EQ(3, n0->UseCount());
+ CHECK_INPUTS(n2, n0, n1, n0);
+ CHECK_USES(n1, n2);
+ CHECK_USES(n0, n2, n1, n2);
+
n0->ReplaceUses(n3);
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(3, n3->UseCount());
-
- Node::Uses uses(n3->uses());
- auto current = uses.begin();
- CHECK(current != uses.end());
- CHECK(*current == n1);
- ++current;
- CHECK(current != uses.end());
- CHECK(*current == n2);
- ++current;
- CHECK(current != uses.end());
- CHECK(*current == n2);
- ++current;
- CHECK(current == uses.end());
+
+ CHECK_USES(n0, NONE);
+ CHECK_INPUTS(n2, n3, n1, n3);
+ CHECK_USES(n3, n2, n1, n2);
}
@@ -378,17 +447,16 @@ TEST(TrimInputCountInline) {
Node* n0 = graph.NewNode(&dummy_operator);
Node* n1 = graph.NewNode(&dummy_operator, n0);
n1->TrimInputCount(1);
- CHECK_EQ(1, n1->InputCount());
- CHECK_EQ(n0, n1->InputAt(0));
- CHECK_EQ(1, n0->UseCount());
+ CHECK_INPUTS(n1, n0);
+ CHECK_USES(n0, n1);
}
{
Node* n0 = graph.NewNode(&dummy_operator);
Node* n1 = graph.NewNode(&dummy_operator, n0);
n1->TrimInputCount(0);
- CHECK_EQ(0, n1->InputCount());
- CHECK_EQ(0, n0->UseCount());
+ CHECK_INPUTS(n1, NONE);
+ CHECK_USES(n0, NONE);
}
{
@@ -396,10 +464,9 @@ TEST(TrimInputCountInline) {
Node* n1 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
n2->TrimInputCount(2);
- CHECK_EQ(2, n2->InputCount());
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(1, n1->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, n0, n1);
+ CHECK_USES(n0, n2);
+ CHECK_USES(n1, n2);
}
{
@@ -407,10 +474,9 @@ TEST(TrimInputCountInline) {
Node* n1 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
n2->TrimInputCount(1);
- CHECK_EQ(1, n2->InputCount());
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(0, n1->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, n0);
+ CHECK_USES(n0, n2);
+ CHECK_USES(n1, NONE);
}
{
@@ -418,28 +484,25 @@ TEST(TrimInputCountInline) {
Node* n1 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
n2->TrimInputCount(0);
- CHECK_EQ(0, n2->InputCount());
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(0, n1->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, NONE);
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, NONE);
}
{
Node* n0 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
n2->TrimInputCount(1);
- CHECK_EQ(1, n2->InputCount());
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, n0);
+ CHECK_USES(n0, n2);
}
{
Node* n0 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
n2->TrimInputCount(0);
- CHECK_EQ(0, n2->InputCount());
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, NONE);
+ CHECK_USES(n0, NONE);
}
}
@@ -451,10 +514,12 @@ TEST(TrimInputCountOutOfLine1) {
Node* n0 = graph.NewNode(&dummy_operator);
Node* n1 = graph.NewNode(&dummy_operator);
n1->AppendInput(graph.zone(), n0);
+ CHECK_INPUTS(n1, n0);
+ CHECK_USES(n0, n1);
+
n1->TrimInputCount(1);
- CHECK_EQ(1, n1->InputCount());
- CHECK_EQ(n0, n1->InputAt(0));
- CHECK_EQ(1, n0->UseCount());
+ CHECK_INPUTS(n1, n0);
+ CHECK_USES(n0, n1);
}
{
@@ -473,14 +538,12 @@ TEST(TrimInputCountOutOfLine1) {
Node* n2 = graph.NewNode(&dummy_operator);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n1);
- CHECK_EQ(2, n2->InputCount());
+ CHECK_INPUTS(n2, n0, n1);
n2->TrimInputCount(2);
- CHECK_EQ(2, n2->InputCount());
- CHECK_EQ(n0, n2->InputAt(0));
- CHECK_EQ(n1, n2->InputAt(1));
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(1, n1->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, n0, n1);
+ CHECK_USES(n0, n2);
+ CHECK_USES(n1, n2);
+ CHECK_USES(n2, NONE);
}
{
@@ -489,13 +552,12 @@ TEST(TrimInputCountOutOfLine1) {
Node* n2 = graph.NewNode(&dummy_operator);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n1);
- CHECK_EQ(2, n2->InputCount());
+ CHECK_INPUTS(n2, n0, n1);
n2->TrimInputCount(1);
- CHECK_EQ(1, n2->InputCount());
- CHECK_EQ(n0, n2->InputAt(0));
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(0, n1->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, n0);
+ CHECK_USES(n0, n2);
+ CHECK_USES(n1, NONE);
+ CHECK_USES(n2, NONE);
}
{
@@ -504,12 +566,12 @@ TEST(TrimInputCountOutOfLine1) {
Node* n2 = graph.NewNode(&dummy_operator);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n1);
- CHECK_EQ(2, n2->InputCount());
+ CHECK_INPUTS(n2, n0, n1);
n2->TrimInputCount(0);
- CHECK_EQ(0, n2->InputCount());
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(0, n1->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, NONE);
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, NONE);
+ CHECK_USES(n2, NONE);
}
{
@@ -517,12 +579,11 @@ TEST(TrimInputCountOutOfLine1) {
Node* n2 = graph.NewNode(&dummy_operator);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n0);
- CHECK_EQ(2, n2->InputCount());
- CHECK_EQ(2, n0->UseCount());
+ CHECK_INPUTS(n2, n0, n0);
+ CHECK_USES(n0, n2, n2);
n2->TrimInputCount(1);
- CHECK_EQ(1, n2->InputCount());
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, n0);
+ CHECK_USES(n0, n2);
}
{
@@ -530,12 +591,11 @@ TEST(TrimInputCountOutOfLine1) {
Node* n2 = graph.NewNode(&dummy_operator);
n2->AppendInput(graph.zone(), n0);
n2->AppendInput(graph.zone(), n0);
- CHECK_EQ(2, n2->InputCount());
- CHECK_EQ(2, n0->UseCount());
+ CHECK_INPUTS(n2, n0, n0);
+ CHECK_USES(n0, n2, n2);
n2->TrimInputCount(0);
- CHECK_EQ(0, n2->InputCount());
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, NONE);
+ CHECK_USES(n0, NONE);
}
}
@@ -548,14 +608,12 @@ TEST(TrimInputCountOutOfLine2) {
Node* n1 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator, n0);
n2->AppendInput(graph.zone(), n1);
- CHECK_EQ(2, n2->InputCount());
+ CHECK_INPUTS(n2, n0, n1);
n2->TrimInputCount(2);
- CHECK_EQ(2, n2->InputCount());
- CHECK_EQ(n0, n2->InputAt(0));
- CHECK_EQ(n1, n2->InputAt(1));
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(1, n1->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, n0, n1);
+ CHECK_USES(n0, n2);
+ CHECK_USES(n1, n2);
+ CHECK_USES(n2, NONE);
}
{
@@ -563,13 +621,12 @@ TEST(TrimInputCountOutOfLine2) {
Node* n1 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator, n0);
n2->AppendInput(graph.zone(), n1);
- CHECK_EQ(2, n2->InputCount());
+ CHECK_INPUTS(n2, n0, n1);
n2->TrimInputCount(1);
- CHECK_EQ(1, n2->InputCount());
- CHECK_EQ(n0, n2->InputAt(0));
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(0, n1->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, n0);
+ CHECK_USES(n0, n2);
+ CHECK_USES(n1, NONE);
+ CHECK_USES(n2, NONE);
}
{
@@ -577,24 +634,24 @@ TEST(TrimInputCountOutOfLine2) {
Node* n1 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator, n0);
n2->AppendInput(graph.zone(), n1);
- CHECK_EQ(2, n2->InputCount());
+ CHECK_INPUTS(n2, n0, n1);
n2->TrimInputCount(0);
- CHECK_EQ(0, n2->InputCount());
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(0, n1->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, NONE);
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, NONE);
+ CHECK_USES(n2, NONE);
}
{
Node* n0 = graph.NewNode(&dummy_operator);
Node* n2 = graph.NewNode(&dummy_operator, n0);
n2->AppendInput(graph.zone(), n0);
- CHECK_EQ(2, n2->InputCount());
- CHECK_EQ(2, n0->UseCount());
+ CHECK_INPUTS(n2, n0, n0);
+ CHECK_USES(n0, n2, n2);
n2->TrimInputCount(1);
- CHECK_EQ(1, n2->InputCount());
- CHECK_EQ(1, n0->UseCount());
- CHECK_EQ(0, n2->UseCount());
+ CHECK_INPUTS(n2, n0);
+ CHECK_USES(n0, n2);
+ CHECK_USES(n2, NONE);
}
{
@@ -611,7 +668,7 @@ TEST(TrimInputCountOutOfLine2) {
}
-TEST(RemoveAllInputs) {
+TEST(NullAllInputs) {
GraphTester graph;
for (int i = 0; i < 2; i++) {
@@ -620,27 +677,27 @@ TEST(RemoveAllInputs) {
Node* n2;
if (i == 0) {
n2 = graph.NewNode(&dummy_operator, n0, n1);
+ CHECK_INPUTS(n2, n0, n1);
} else {
n2 = graph.NewNode(&dummy_operator, n0);
+ CHECK_INPUTS(n2, n0);
n2->AppendInput(graph.zone(), n1); // with out-of-line input.
+ CHECK_INPUTS(n2, n0, n1);
}
- n0->RemoveAllInputs();
- CHECK_EQ(0, n0->InputCount());
+ n0->NullAllInputs();
+ CHECK_INPUTS(n0, NONE);
- CHECK_EQ(2, n0->UseCount());
- n1->RemoveAllInputs();
- CHECK_EQ(1, n1->InputCount());
- CHECK_EQ(1, n0->UseCount());
- CHECK(!n1->InputAt(0));
+ CHECK_USES(n0, n1, n2);
+ n1->NullAllInputs();
+ CHECK_INPUTS(n1, NULL);
+ CHECK_INPUTS(n2, n0, n1);
+ CHECK_USES(n0, n2);
- CHECK_EQ(1, n1->UseCount());
- n2->RemoveAllInputs();
- CHECK_EQ(2, n2->InputCount());
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(0, n1->UseCount());
- CHECK(!n2->InputAt(0));
- CHECK(!n2->InputAt(1));
+ n2->NullAllInputs();
+ CHECK_INPUTS(n1, NULL);
+ CHECK_INPUTS(n2, NULL, NULL);
+ CHECK_USES(n0, NONE);
}
{
@@ -648,11 +705,53 @@ TEST(RemoveAllInputs) {
Node* n1 = graph.NewNode(&dummy_operator, n0);
n1->ReplaceInput(0, n1); // self-reference.
- CHECK_EQ(0, n0->UseCount());
- CHECK_EQ(1, n1->UseCount());
- n1->RemoveAllInputs();
- CHECK_EQ(1, n1->InputCount());
- CHECK_EQ(0, n1->UseCount());
- CHECK(!n1->InputAt(0));
+ CHECK_INPUTS(n0, NONE);
+ CHECK_INPUTS(n1, n1);
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, n1);
+ n1->NullAllInputs();
+
+ CHECK_INPUTS(n0, NONE);
+ CHECK_INPUTS(n1, NULL);
+ CHECK_USES(n0, NONE);
+ CHECK_USES(n1, NONE);
+ }
+}
+
+
+TEST(AppendAndTrim) {
+ GraphTester graph;
+
+ Node* nodes[] = {
+ graph.NewNode(&dummy_operator), graph.NewNode(&dummy_operator),
+ graph.NewNode(&dummy_operator), graph.NewNode(&dummy_operator),
+ graph.NewNode(&dummy_operator)};
+
+ int max = static_cast<int>(arraysize(nodes));
+
+ Node* last = graph.NewNode(&dummy_operator);
+
+ for (int i = 0; i < max; i++) {
+ last->AppendInput(graph.zone(), nodes[i]);
+ CheckInputs(last, nodes, i + 1);
+
+ for (int j = 0; j < max; j++) {
+ if (j <= i) CHECK_USES(nodes[j], last);
+ if (j > i) CHECK_USES(nodes[j], NONE);
+ }
+
+ CHECK_USES(last, NONE);
+ }
+
+ for (int i = max; i >= 0; i--) {
+ last->TrimInputCount(i);
+ CheckInputs(last, nodes, i);
+
+ for (int j = 0; j < i; j++) {
+ if (j < i) CHECK_USES(nodes[j], last);
+ if (j >= i) CHECK_USES(nodes[j], NONE);
+ }
+
+ CHECK_USES(last, NONE);
}
}
diff --git a/deps/v8/test/cctest/compiler/test-osr.cc b/deps/v8/test/cctest/compiler/test-osr.cc
index e3963901a5..d2171188f8 100644
--- a/deps/v8/test/cctest/compiler/test-osr.cc
+++ b/deps/v8/test/cctest/compiler/test-osr.cc
@@ -36,6 +36,8 @@ static int CheckInputs(Node* node, Node* i0 = NULL, Node* i1 = NULL,
static Operator kIntLt(IrOpcode::kInt32LessThan, Operator::kPure,
"Int32LessThan", 2, 0, 0, 1, 0, 0);
+static Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0,
+ 0, 1, 0, 0);
static const int kMaxOsrValues = 10;
@@ -122,7 +124,12 @@ class OsrDeconstructorTester : public HandleAndZoneScope {
CHECK(!nodes.IsLive(osr_normal_entry));
CHECK(!nodes.IsLive(osr_loop_entry));
// No dangling nodes should be left over.
- CHECK_EQ(0u, nodes.gray.size());
+ for (Node* const node : nodes.live) {
+ for (Node* const use : node->uses()) {
+ CHECK(std::find(nodes.live.begin(), nodes.live.end(), use) !=
+ nodes.live.end());
+ }
+ }
}
};
@@ -484,3 +491,105 @@ TEST(Deconstruct_osr_nested2) {
CheckInputs(new_outer_phi, new_entry_phi, new_inner_phi,
T.jsgraph.ZeroConstant(), new_outer_loop);
}
+
+
+Node* MakeCounter(JSGraph* jsgraph, Node* start, Node* loop) {
+ int count = loop->InputCount();
+ NodeVector tmp_inputs(jsgraph->graph()->zone());
+ for (int i = 0; i < count; i++) {
+ tmp_inputs.push_back(start);
+ }
+ tmp_inputs.push_back(loop);
+
+ Node* phi = jsgraph->graph()->NewNode(
+ jsgraph->common()->Phi(kMachInt32, count), count + 1, &tmp_inputs[0]);
+ Node* inc = jsgraph->graph()->NewNode(&kIntAdd, phi, jsgraph->OneConstant());
+
+ for (int i = 1; i < count; i++) {
+ phi->ReplaceInput(i, inc);
+ }
+ return phi;
+}
+
+
+TEST(Deconstruct_osr_nested3) {
+ OsrDeconstructorTester T(1);
+
+ // outermost loop.
+ While loop0(T, T.p0, false, 1);
+ Node* loop0_cntr = MakeCounter(&T.jsgraph, T.p0, loop0.loop);
+ loop0.branch->ReplaceInput(0, loop0_cntr);
+
+ // middle loop.
+ Node* loop1 = T.graph.NewNode(T.common.Loop(2), loop0.if_true, T.self);
+ loop1->ReplaceInput(0, loop0.if_true);
+ Node* loop1_phi =
+ T.graph.NewNode(T.common.Phi(kMachAnyTagged, 2), loop0_cntr, loop0_cntr);
+
+ // innermost (OSR) loop.
+ While loop2(T, T.p0, true, 1);
+ loop2.loop->ReplaceInput(0, loop1);
+
+ Node* loop2_cntr = MakeCounter(&T.jsgraph, loop1_phi, loop2.loop);
+ loop2_cntr->ReplaceInput(1, T.osr_values[0]);
+ Node* osr_phi = loop2_cntr;
+ Node* loop2_inc = loop2_cntr->InputAt(2);
+ loop2.branch->ReplaceInput(0, loop2_cntr);
+
+ loop1_phi->ReplaceInput(1, loop2_cntr);
+ loop0_cntr->ReplaceInput(1, loop2_cntr);
+
+ // Branch to either the outer or middle loop.
+ Node* branch = T.graph.NewNode(T.common.Branch(), loop2_cntr, loop2.exit);
+ Node* if_true = T.graph.NewNode(T.common.IfTrue(), branch);
+ Node* if_false = T.graph.NewNode(T.common.IfFalse(), branch);
+
+ loop0.loop->ReplaceInput(1, if_true);
+ loop1->ReplaceInput(1, if_false);
+
+ Node* ret =
+ T.graph.NewNode(T.common.Return(), loop0_cntr, T.start, loop0.exit);
+ Node* end = T.graph.NewNode(T.common.End(), ret);
+ T.graph.SetEnd(end);
+
+ T.DeconstructOsr();
+
+ // Check structure of deconstructed graph.
+ // Check loop2 (OSR loop) is directly connected to start.
+ CheckInputs(loop2.loop, T.start, loop2.if_true);
+ CheckInputs(osr_phi, T.osr_values[0], loop2_inc, loop2.loop);
+ CheckInputs(loop2.branch, osr_phi, loop2.loop);
+ CheckInputs(loop2.if_true, loop2.branch);
+ CheckInputs(loop2.exit, loop2.branch);
+ CheckInputs(branch, osr_phi, loop2.exit);
+ CheckInputs(if_true, branch);
+ CheckInputs(if_false, branch);
+
+ // Check structure of new_loop1.
+ Node* new_loop1_loop = FindSuccessor(if_false, IrOpcode::kLoop);
+ // TODO(titzer): check the internal copy of loop2.
+ USE(new_loop1_loop);
+
+ // Check structure of new_loop0.
+ Node* new_loop0_loop_entry = FindSuccessor(if_true, IrOpcode::kMerge);
+ Node* new_loop0_loop = FindSuccessor(new_loop0_loop_entry, IrOpcode::kLoop);
+ // TODO(titzer): check the internal copies of loop1 and loop2.
+
+ Node* new_loop0_branch = FindSuccessor(new_loop0_loop, IrOpcode::kBranch);
+ Node* new_loop0_if_true = FindSuccessor(new_loop0_branch, IrOpcode::kIfTrue);
+ Node* new_loop0_exit = FindSuccessor(new_loop0_branch, IrOpcode::kIfFalse);
+
+ USE(new_loop0_if_true);
+
+ Node* new_ret = T.graph.end()->InputAt(0);
+ CHECK_EQ(IrOpcode::kReturn, new_ret->opcode());
+
+ Node* new_loop0_phi = new_ret->InputAt(0);
+ CHECK_EQ(IrOpcode::kPhi, new_loop0_phi->opcode());
+ CHECK_EQ(new_loop0_loop, NodeProperties::GetControlInput(new_loop0_phi));
+ CHECK_EQ(new_loop0_phi, FindSuccessor(new_loop0_loop, IrOpcode::kPhi));
+
+ // Check that the return returns the phi from the OSR loop and control
+ // depends on the copy of the outer loop0.
+ CheckInputs(new_ret, new_loop0_phi, T.graph.start(), new_loop0_exit);
+}
diff --git a/deps/v8/test/cctest/compiler/test-pipeline.cc b/deps/v8/test/cctest/compiler/test-pipeline.cc
index 98b0baeefd..b67af6ecf7 100644
--- a/deps/v8/test/cctest/compiler/test-pipeline.cc
+++ b/deps/v8/test/cctest/compiler/test-pipeline.cc
@@ -17,13 +17,13 @@ using namespace v8::internal;
using namespace v8::internal::compiler;
TEST(PipelineAdd) {
- InitializedHandleScope handles;
+ HandleAndZoneScope handles;
const char* source = "(function(a,b) { return a + b; })";
Handle<JSFunction> function = v8::Utils::OpenHandle(
*v8::Handle<v8::Function>::Cast(CompileRun(source)));
- CompilationInfoWithZone info(function);
-
- CHECK(Compiler::ParseAndAnalyze(&info));
+ ParseInfo parse_info(handles.main_zone(), function);
+ CHECK(Compiler::ParseAndAnalyze(&parse_info));
+ CompilationInfo info(&parse_info);
Pipeline pipeline(&info);
#if V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-run-inlining.cc b/deps/v8/test/cctest/compiler/test-run-inlining.cc
index 19b96bad50..a6d76e4d57 100644
--- a/deps/v8/test/cctest/compiler/test-run-inlining.cc
+++ b/deps/v8/test/cctest/compiler/test-run-inlining.cc
@@ -11,9 +11,11 @@
using namespace v8::internal;
using namespace v8::internal::compiler;
+namespace {
+
// Helper to determine inline count via JavaScriptFrame::GetInlineCount.
// Note that a count of 1 indicates that no inlining has occured.
-static void AssertInlineCount(const v8::FunctionCallbackInfo<v8::Value>& args) {
+void AssertInlineCount(const v8::FunctionCallbackInfo<v8::Value>& args) {
StackTraceFrameIterator it(CcTest::i_isolate());
int frames_seen = 0;
JavaScriptFrame* topmost = it.frame();
@@ -30,7 +32,7 @@ static void AssertInlineCount(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
-static void InstallAssertInlineCountHelper(v8::Isolate* isolate) {
+void InstallAssertInlineCountHelper(v8::Isolate* isolate) {
v8::Local<v8::Context> context = isolate->GetCurrentContext();
v8::Local<v8::FunctionTemplate> t =
v8::FunctionTemplate::New(isolate, AssertInlineCount);
@@ -38,9 +40,15 @@ static void InstallAssertInlineCountHelper(v8::Isolate* isolate) {
}
-static uint32_t kInlineFlags = CompilationInfo::kInliningEnabled |
- CompilationInfo::kContextSpecializing |
- CompilationInfo::kTypingEnabled;
+const uint32_t kBuiltinInlineFlags = CompilationInfo::kBuiltinInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled;
+
+const uint32_t kInlineFlags = CompilationInfo::kInliningEnabled |
+ CompilationInfo::kContextSpecializing |
+ CompilationInfo::kTypingEnabled;
+
+} // namespace
TEST(SimpleInlining) {
@@ -320,6 +328,53 @@ TEST(InlineLoopGuardedTwice) {
}
+TEST(InlineLoopUnguardedEmpty) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ " function foo(s) { AssertInlineCount(2); while (s); return s; };"
+ " function bar(s, t) { return foo(s); };"
+ " return bar;"
+ "})();",
+ kInlineFlags);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(0.0), T.Val(0.0), T.Val(4));
+}
+
+
+TEST(InlineLoopUnguardedOnce) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ " function foo(s) { AssertInlineCount(2); while (s) {"
+ " s = s - 1; }; return s; };"
+ " function bar(s, t) { return foo(s); };"
+ " return bar;"
+ "})();",
+ kInlineFlags);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(0.0), T.Val(0.0), T.Val(4));
+}
+
+
+TEST(InlineLoopUnguardedTwice) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ " function foo(s) { AssertInlineCount(2); while (s > 0) {"
+ " s = s - 1; }; return s; };"
+ " function bar(s,t) { return foo(foo(s,t),t); };"
+ " return bar;"
+ "})();",
+ kInlineFlags);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.Val(0.0), T.Val(0.0), T.Val(4));
+}
+
+
TEST(InlineStrictIntoNonStrict) {
FLAG_turbo_deoptimization = true;
FunctionTester T(
@@ -437,4 +492,38 @@ TEST(InlineWithArguments) {
T.CheckCall(T.true_value(), T.Val(12), T.Val(14));
}
+
+TEST(InlineBuiltin) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ " function foo(s,t,u) { AssertInlineCount(2); return true; }"
+ " function bar() { return foo(); };"
+ " %SetInlineBuiltinFlag(foo);"
+ " return bar;"
+ "})();",
+ kBuiltinInlineFlags);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.true_value());
+}
+
+
+TEST(InlineNestedBuiltin) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function () {"
+ " function foo(s,t,u) { AssertInlineCount(3); return true; }"
+ " function baz(s,t,u) { return foo(s,t,u); }"
+ " function bar() { return baz(); };"
+ " %SetInlineBuiltinFlag(foo);"
+ " %SetInlineBuiltinFlag(baz);"
+ " return bar;"
+ "})();",
+ kBuiltinInlineFlags);
+
+ InstallAssertInlineCountHelper(CcTest::isolate());
+ T.CheckCall(T.true_value());
+}
+
#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
index bd4038ed91..7fc5cc9758 100644
--- a/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
+++ b/deps/v8/test/cctest/compiler/test-run-intrinsics.cc
@@ -10,29 +10,98 @@ using namespace v8::internal;
using namespace v8::internal::compiler;
uint32_t flags = CompilationInfo::kInliningEnabled;
-TEST(IsSmi) {
+
+TEST(CallFunction) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a) { return %_IsSmi(a); })", flags);
+ FunctionTester T("(function(a,b) { return %_CallFunction(a, 1, 2, 3, b); })",
+ flags);
+ CompileRun("function f(a,b,c) { return a + b + c + this.d; }");
- T.CheckTrue(T.Val(1));
- T.CheckFalse(T.Val(1.1));
- T.CheckFalse(T.Val(-0.0));
- T.CheckTrue(T.Val(-2));
- T.CheckFalse(T.Val(-2.3));
+ T.CheckCall(T.Val(129), T.NewObject("({d:123})"), T.NewObject("f"));
+ T.CheckCall(T.Val("6x"), T.NewObject("({d:'x'})"), T.NewObject("f"));
+}
+
+
+TEST(ClassOf) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T("(function(a) { return %_ClassOf(a); })", flags);
+
+ T.CheckCall(T.Val("Function"), T.NewObject("(function() {})"));
+ T.CheckCall(T.Val("Array"), T.NewObject("([1])"));
+ T.CheckCall(T.Val("Object"), T.NewObject("({})"));
+ T.CheckCall(T.Val("RegExp"), T.NewObject("(/x/)"));
+ T.CheckCall(T.null(), T.undefined());
+ T.CheckCall(T.null(), T.null());
+ T.CheckCall(T.null(), T.Val("x"));
+ T.CheckCall(T.null(), T.Val(1));
+}
+
+
+TEST(HeapObjectGetMap) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T("(function(a) { return %_HeapObjectGetMap(a); })", flags);
+
+ Factory* factory = T.main_isolate()->factory();
+ T.CheckCall(factory->null_map(), T.null());
+ T.CheckCall(factory->undefined_map(), T.undefined());
+ T.CheckCall(factory->heap_number_map(), T.Val(3.1415));
+ T.CheckCall(factory->symbol_map(), factory->NewSymbol());
+}
+
+
+#define COUNTER_NAME "hurz"
+
+static int* LookupCounter(const char* name) {
+ static int counter = 1234;
+ return strcmp(name, COUNTER_NAME) == 0 ? &counter : nullptr;
+}
+
+
+TEST(IncrementStatsCounter) {
+ FLAG_turbo_deoptimization = true;
+ FLAG_native_code_counters = true;
+ reinterpret_cast<v8::Isolate*>(CcTest::InitIsolateOnce())
+ ->SetCounterFunction(LookupCounter);
+ FunctionTester T(
+ "(function() { %_IncrementStatsCounter('" COUNTER_NAME "'); })", flags);
+ StatsCounter counter(T.main_isolate(), COUNTER_NAME);
+ if (!counter.Enabled()) return;
+
+ int old_value = *counter.GetInternalPointer();
+ T.CheckCall(T.undefined());
+ CHECK_EQ(old_value + 1, *counter.GetInternalPointer());
+}
+
+#undef COUNTER_NAME
+
+
+TEST(IsArray) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T("(function(a) { return %_IsArray(a); })", flags);
+
+ T.CheckFalse(T.NewObject("(function() {})"));
+ T.CheckTrue(T.NewObject("([1])"));
+ T.CheckFalse(T.NewObject("({})"));
+ T.CheckFalse(T.NewObject("(/x/)"));
T.CheckFalse(T.undefined());
+ T.CheckFalse(T.null());
+ T.CheckFalse(T.Val("x"));
+ T.CheckFalse(T.Val(1));
}
-TEST(IsNonNegativeSmi) {
+TEST(IsFunction) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a) { return %_IsNonNegativeSmi(a); })", flags);
+ FunctionTester T("(function(a) { return %_IsFunction(a); })", flags);
- T.CheckTrue(T.Val(1));
- T.CheckFalse(T.Val(1.1));
- T.CheckFalse(T.Val(-0.0));
- T.CheckFalse(T.Val(-2));
- T.CheckFalse(T.Val(-2.3));
+ T.CheckTrue(T.NewObject("(function() {})"));
+ T.CheckFalse(T.NewObject("([1])"));
+ T.CheckFalse(T.NewObject("({})"));
+ T.CheckFalse(T.NewObject("(/x/)"));
T.CheckFalse(T.undefined());
+ T.CheckFalse(T.null());
+ T.CheckFalse(T.Val("x"));
+ T.CheckFalse(T.Val(1));
}
@@ -49,18 +118,16 @@ TEST(IsMinusZero) {
}
-TEST(IsArray) {
+TEST(IsNonNegativeSmi) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a) { return %_IsArray(a); })", flags);
+ FunctionTester T("(function(a) { return %_IsNonNegativeSmi(a); })", flags);
- T.CheckFalse(T.NewObject("(function() {})"));
- T.CheckTrue(T.NewObject("([1])"));
- T.CheckFalse(T.NewObject("({})"));
- T.CheckFalse(T.NewObject("(/x/)"));
+ T.CheckTrue(T.Val(1));
+ T.CheckFalse(T.Val(1.1));
+ T.CheckFalse(T.Val(-0.0));
+ T.CheckFalse(T.Val(-2));
+ T.CheckFalse(T.Val(-2.3));
T.CheckFalse(T.undefined());
- T.CheckFalse(T.null());
- T.CheckFalse(T.Val("x"));
- T.CheckFalse(T.Val(1));
}
@@ -79,14 +146,14 @@ TEST(IsObject) {
}
-TEST(IsFunction) {
+TEST(IsRegExp) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a) { return %_IsFunction(a); })", flags);
+ FunctionTester T("(function(a) { return %_IsRegExp(a); })", flags);
- T.CheckTrue(T.NewObject("(function() {})"));
+ T.CheckFalse(T.NewObject("(function() {})"));
T.CheckFalse(T.NewObject("([1])"));
T.CheckFalse(T.NewObject("({})"));
- T.CheckFalse(T.NewObject("(/x/)"));
+ T.CheckTrue(T.NewObject("(/x/)"));
T.CheckFalse(T.undefined());
T.CheckFalse(T.null());
T.CheckFalse(T.Val("x"));
@@ -94,33 +161,30 @@ TEST(IsFunction) {
}
-TEST(IsRegExp) {
+TEST(IsSmi) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a) { return %_IsRegExp(a); })", flags);
+ FunctionTester T("(function(a) { return %_IsSmi(a); })", flags);
- T.CheckFalse(T.NewObject("(function() {})"));
- T.CheckFalse(T.NewObject("([1])"));
- T.CheckFalse(T.NewObject("({})"));
- T.CheckTrue(T.NewObject("(/x/)"));
+ T.CheckTrue(T.Val(1));
+ T.CheckFalse(T.Val(1.1));
+ T.CheckFalse(T.Val(-0.0));
+ T.CheckTrue(T.Val(-2));
+ T.CheckFalse(T.Val(-2.3));
T.CheckFalse(T.undefined());
- T.CheckFalse(T.null());
- T.CheckFalse(T.Val("x"));
- T.CheckFalse(T.Val(1));
}
-TEST(ClassOf) {
+TEST(MapGetInstanceType) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a) { return %_ClassOf(a); })", flags);
-
- T.CheckCall(T.Val("Function"), T.NewObject("(function() {})"));
- T.CheckCall(T.Val("Array"), T.NewObject("([1])"));
- T.CheckCall(T.Val("Object"), T.NewObject("({})"));
- T.CheckCall(T.Val("RegExp"), T.NewObject("(/x/)"));
- T.CheckCall(T.null(), T.undefined());
- T.CheckCall(T.null(), T.null());
- T.CheckCall(T.null(), T.Val("x"));
- T.CheckCall(T.null(), T.Val(1));
+ FunctionTester T(
+ "(function(a) { return %_MapGetInstanceType(%_HeapObjectGetMap(a)); })",
+ flags);
+
+ Factory* factory = T.main_isolate()->factory();
+ T.CheckCall(T.Val(ODDBALL_TYPE), T.null());
+ T.CheckCall(T.Val(ODDBALL_TYPE), T.undefined());
+ T.CheckCall(T.Val(HEAP_NUMBER_TYPE), T.Val(3.1415));
+ T.CheckCall(T.Val(SYMBOL_TYPE), factory->NewSymbol());
}
@@ -138,14 +202,48 @@ TEST(ObjectEquals) {
}
-TEST(ValueOf) {
+TEST(OneByteSeqStringGetChar) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a) { return %_ValueOf(a); })", flags);
+ FunctionTester T("(function(a,b) { return %_OneByteSeqStringGetChar(a,b); })",
+ flags);
- T.CheckCall(T.Val("a"), T.Val("a"));
- T.CheckCall(T.Val("b"), T.NewObject("(new String('b'))"));
- T.CheckCall(T.Val(123), T.Val(123));
- T.CheckCall(T.Val(456), T.NewObject("(new Number(456))"));
+ Handle<SeqOneByteString> string =
+ T.main_isolate()->factory()->NewRawOneByteString(3).ToHandleChecked();
+ string->SeqOneByteStringSet(0, 'b');
+ string->SeqOneByteStringSet(1, 'a');
+ string->SeqOneByteStringSet(2, 'r');
+ T.CheckCall(T.Val('b'), string, T.Val(0.0));
+ T.CheckCall(T.Val('a'), string, T.Val(1));
+ T.CheckCall(T.Val('r'), string, T.Val(2));
+}
+
+
+TEST(OneByteSeqStringSetChar) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T("(function(a,b) { %_OneByteSeqStringSetChar(a,88,b); })",
+ flags);
+
+ Handle<SeqOneByteString> string =
+ T.main_isolate()->factory()->NewRawOneByteString(3).ToHandleChecked();
+ string->SeqOneByteStringSet(0, 'b');
+ string->SeqOneByteStringSet(1, 'a');
+ string->SeqOneByteStringSet(2, 'r');
+ T.Call(T.Val(1), string);
+ CHECK_EQ('b', string->SeqOneByteStringGet(0));
+ CHECK_EQ('X', string->SeqOneByteStringGet(1));
+ CHECK_EQ('r', string->SeqOneByteStringGet(2));
+}
+
+
+TEST(NewConsString) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T(
+ "(function() { "
+ " return %_NewConsString(14, true, 'abcdefghi', 'jklmn');"
+ " })",
+ flags);
+
+ T.CheckCall(T.Val("abcdefghijklmn"));
}
@@ -159,13 +257,13 @@ TEST(SetValueOf) {
}
-TEST(StringCharFromCode) {
+TEST(StringAdd) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a) { return %_StringCharFromCode(a); })", flags);
+ FunctionTester T("(function(a,b) { return %_StringAdd(a,b); })", flags);
- T.CheckCall(T.Val("a"), T.Val(97));
- T.CheckCall(T.Val("\xE2\x9D\x8A"), T.Val(0x274A));
- T.CheckCall(T.Val(""), T.undefined());
+ T.CheckCall(T.Val("aaabbb"), T.Val("aaa"), T.Val("bbb"));
+ T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(""));
+ T.CheckCall(T.Val("bbb"), T.Val(""), T.Val("bbb"));
}
@@ -190,17 +288,27 @@ TEST(StringCharCodeAt) {
}
-TEST(StringAdd) {
+TEST(StringCharFromCode) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a,b) { return %_StringAdd(a,b); })", flags);
+ FunctionTester T("(function(a) { return %_StringCharFromCode(a); })", flags);
- T.CheckCall(T.Val("aaabbb"), T.Val("aaa"), T.Val("bbb"));
- T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(""));
- T.CheckCall(T.Val("bbb"), T.Val(""), T.Val("bbb"));
+ T.CheckCall(T.Val("a"), T.Val(97));
+ T.CheckCall(T.Val("\xE2\x9D\x8A"), T.Val(0x274A));
+ T.CheckCall(T.Val(""), T.undefined());
+}
+
+
+TEST(StringCompare) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T("(function(a,b) { return %_StringCompare(a,b); })", flags);
+
+ T.CheckCall(T.Val(-1), T.Val("aaa"), T.Val("bbb"));
+ T.CheckCall(T.Val(0.0), T.Val("bbb"), T.Val("bbb"));
+ T.CheckCall(T.Val(+1), T.Val("ccc"), T.Val("bbb"));
}
-TEST(StringSubString) {
+TEST(SubString) {
FLAG_turbo_deoptimization = true;
FunctionTester T("(function(a,b) { return %_SubString(a,b,b+3); })", flags);
@@ -210,22 +318,45 @@ TEST(StringSubString) {
}
-TEST(StringCompare) {
+TEST(TwoByteSeqStringGetChar) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a,b) { return %_StringCompare(a,b); })", flags);
+ FunctionTester T("(function(a,b) { return %_TwoByteSeqStringGetChar(a,b); })",
+ flags);
- T.CheckCall(T.Val(-1), T.Val("aaa"), T.Val("bbb"));
- T.CheckCall(T.Val(0.0), T.Val("bbb"), T.Val("bbb"));
- T.CheckCall(T.Val(+1), T.Val("ccc"), T.Val("bbb"));
+ Handle<SeqTwoByteString> string =
+ T.main_isolate()->factory()->NewRawTwoByteString(3).ToHandleChecked();
+ string->SeqTwoByteStringSet(0, 'b');
+ string->SeqTwoByteStringSet(1, 'a');
+ string->SeqTwoByteStringSet(2, 'r');
+ T.CheckCall(T.Val('b'), string, T.Val(0.0));
+ T.CheckCall(T.Val('a'), string, T.Val(1));
+ T.CheckCall(T.Val('r'), string, T.Val(2));
}
-TEST(CallFunction) {
+TEST(TwoByteSeqStringSetChar) {
FLAG_turbo_deoptimization = true;
- FunctionTester T("(function(a,b) { return %_CallFunction(a, 1, 2, 3, b); })",
+ FunctionTester T("(function(a,b) { %_TwoByteSeqStringSetChar(a,88,b); })",
flags);
- CompileRun("function f(a,b,c) { return a + b + c + this.d; }");
- T.CheckCall(T.Val(129), T.NewObject("({d:123})"), T.NewObject("f"));
- T.CheckCall(T.Val("6x"), T.NewObject("({d:'x'})"), T.NewObject("f"));
+ Handle<SeqTwoByteString> string =
+ T.main_isolate()->factory()->NewRawTwoByteString(3).ToHandleChecked();
+ string->SeqTwoByteStringSet(0, 'b');
+ string->SeqTwoByteStringSet(1, 'a');
+ string->SeqTwoByteStringSet(2, 'r');
+ T.Call(T.Val(1), string);
+ CHECK_EQ('b', string->SeqTwoByteStringGet(0));
+ CHECK_EQ('X', string->SeqTwoByteStringGet(1));
+ CHECK_EQ('r', string->SeqTwoByteStringGet(2));
+}
+
+
+TEST(ValueOf) {
+ FLAG_turbo_deoptimization = true;
+ FunctionTester T("(function(a) { return %_ValueOf(a); })", flags);
+
+ T.CheckCall(T.Val("a"), T.Val("a"));
+ T.CheckCall(T.Val("b"), T.NewObject("(new String('b'))"));
+ T.CheckCall(T.Val(123), T.Val(123));
+ T.CheckCall(T.Val(456), T.NewObject("(new Number(456))"));
}
diff --git a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
index 74990daac9..f06dc5f315 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsexceptions.cc
@@ -18,7 +18,8 @@ TEST(Throw) {
}
-TEST(ThrowSourcePosition) {
+TEST(ThrowMessagePosition) {
+ i::FLAG_turbo_exceptions = true;
static const char* src =
"(function(a, b) { \n"
" if (a == 1) throw 1; \n"
@@ -30,22 +31,57 @@ TEST(ThrowSourcePosition) {
v8::Handle<v8::Message> message;
message = T.CheckThrowsReturnMessage(T.Val(1), T.undefined());
- CHECK(!message.IsEmpty());
CHECK_EQ(2, message->GetLineNumber());
CHECK_EQ(40, message->GetStartPosition());
message = T.CheckThrowsReturnMessage(T.Val(2), T.undefined());
- CHECK(!message.IsEmpty());
CHECK_EQ(3, message->GetLineNumber());
CHECK_EQ(67, message->GetStartPosition());
message = T.CheckThrowsReturnMessage(T.Val(3), T.undefined());
- CHECK(!message.IsEmpty());
CHECK_EQ(4, message->GetLineNumber());
CHECK_EQ(95, message->GetStartPosition());
}
+TEST(ThrowMessageDirectly) {
+ i::FLAG_turbo_exceptions = true;
+ static const char* src =
+ "(function(a, b) {"
+ " if (a) { throw b; } else { throw new Error(b); }"
+ "})";
+ FunctionTester T(src);
+ v8::Handle<v8::Message> message;
+
+ message = T.CheckThrowsReturnMessage(T.false_value(), T.Val("Wat?"));
+ CHECK(message->Get()->Equals(v8_str("Uncaught Error: Wat?")));
+
+ message = T.CheckThrowsReturnMessage(T.true_value(), T.Val("Kaboom!"));
+ CHECK(message->Get()->Equals(v8_str("Uncaught Kaboom!")));
+}
+
+
+TEST(ThrowMessageIndirectly) {
+ i::FLAG_turbo_exceptions = true;
+ static const char* src =
+ "(function(a, b) {"
+ " try {"
+ " if (a) { throw b; } else { throw new Error(b); }"
+ " } finally {"
+ " try { throw 'clobber'; } catch (e) { 'unclobber'; }"
+ " }"
+ "})";
+ FunctionTester T(src);
+ v8::Handle<v8::Message> message;
+
+ message = T.CheckThrowsReturnMessage(T.false_value(), T.Val("Wat?"));
+ CHECK(message->Get()->Equals(v8_str("Uncaught Error: Wat?")));
+
+ message = T.CheckThrowsReturnMessage(T.true_value(), T.Val("Kaboom!"));
+ CHECK(message->Get()->Equals(v8_str("Uncaught Kaboom!")));
+}
+
+
// TODO(mstarzinger): Increase test coverage by having similar tests within the
// mjsunit suite to also test integration with other components (e.g. OSR).
@@ -118,6 +154,28 @@ TEST(CatchBreak) {
}
+TEST(CatchCall) {
+ i::FLAG_turbo_exceptions = true;
+ const char* src =
+ "(function(fun) {"
+ " var r = '-';"
+ " try {"
+ " r += 'A-';"
+ " return r + 'B-' + fun();"
+ " } catch (e) {"
+ " r += e;"
+ " }"
+ " return r;"
+ "})";
+ FunctionTester T(src);
+
+ CompileRun("function thrower() { throw 'T-'; }");
+ T.CheckCall(T.Val("-A-T-"), T.NewFunction("thrower"));
+ CompileRun("function returner() { return 'R-'; }");
+ T.CheckCall(T.Val("-A-B-R-"), T.NewFunction("returner"));
+}
+
+
TEST(Finally) {
i::FLAG_turbo_exceptions = true;
const char* src =
@@ -158,3 +216,76 @@ TEST(FinallyBreak) {
T.CheckCall(T.Val("-A-B-D-"), T.false_value(), T.true_value());
T.CheckCall(T.Val("-A-B-C-D-"), T.false_value(), T.false_value());
}
+
+
+TEST(DeoptTry) {
+ i::FLAG_turbo_exceptions = true;
+ i::FLAG_turbo_deoptimization = true;
+ const char* src =
+ "(function f(a) {"
+ " try {"
+ " %DeoptimizeFunction(f);"
+ " throw a;"
+ " } catch (e) {"
+ " return e + 1;"
+ " }"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val(2), T.Val(1));
+}
+
+
+TEST(DeoptCatch) {
+ i::FLAG_turbo_exceptions = true;
+ i::FLAG_turbo_deoptimization = true;
+ const char* src =
+ "(function f(a) {"
+ " try {"
+ " throw a;"
+ " } catch (e) {"
+ " %DeoptimizeFunction(f);"
+ " return e + 1;"
+ " }"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val(2), T.Val(1));
+}
+
+
+TEST(DeoptFinallyReturn) {
+ i::FLAG_turbo_exceptions = true;
+ i::FLAG_turbo_deoptimization = true;
+ const char* src =
+ "(function f(a) {"
+ " try {"
+ " throw a;"
+ " } finally {"
+ " %DeoptimizeFunction(f);"
+ " return a + 1;"
+ " }"
+ "})";
+ FunctionTester T(src);
+
+ T.CheckCall(T.Val(2), T.Val(1));
+}
+
+
+TEST(DeoptFinallyReThrow) {
+ i::FLAG_turbo_exceptions = true;
+ i::FLAG_turbo_deoptimization = true;
+ const char* src =
+ "(function f(a) {"
+ " try {"
+ " throw a;"
+ " } finally {"
+ " %DeoptimizeFunction(f);"
+ " }"
+ "})";
+ FunctionTester T(src);
+
+#if 0 // TODO(mstarzinger): Enable once we can.
+ T.CheckThrows(T.NewObject("new Error"), T.Val(1));
+#endif
+}
diff --git a/deps/v8/test/cctest/compiler/test-run-jsops.cc b/deps/v8/test/cctest/compiler/test-run-jsops.cc
index bb7c239a59..032db82db3 100644
--- a/deps/v8/test/cctest/compiler/test-run-jsops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-jsops.cc
@@ -451,7 +451,6 @@ TEST(LookupStore) {
TEST(BlockLoadStore) {
- FLAG_harmony_scoping = true;
FunctionTester T("(function(a) { 'use strict'; { let x = a+a; return x; }})");
T.CheckCall(T.Val(46), T.Val(23));
@@ -460,7 +459,6 @@ TEST(BlockLoadStore) {
TEST(BlockLoadStoreNested) {
- FLAG_harmony_scoping = true;
const char* src =
"(function(a,b) {"
"'use strict';"
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index 5a55ce6e23..102e6d8ad4 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -4436,6 +4436,26 @@ TEST(RunInt32SubWithOverflowInBranchP) {
}
+TEST(RunWord64EqualInBranchP) {
+ int64_t input;
+ MLabel blocka, blockb;
+ RawMachineAssemblerTester<int64_t> m;
+ if (!m.machine()->Is64()) return;
+ Node* value = m.LoadFromPointer(&input, kMachInt64);
+ m.Branch(m.Word64Equal(value, m.Int64Constant(0)), &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(2));
+ input = V8_INT64_C(0);
+ CHECK_EQ(1, m.Call());
+ input = V8_INT64_C(1);
+ CHECK_EQ(2, m.Call());
+ input = V8_INT64_C(0x100000000);
+ CHECK_EQ(2, m.Call());
+}
+
+
TEST(RunChangeInt32ToInt64P) {
if (kPointerSize < 8) return;
int64_t actual = -1;
@@ -4627,6 +4647,72 @@ TEST(RunFloat32Constant) {
}
+TEST(RunFloat64ExtractLowWord32) {
+ uint64_t input = 0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.Return(m.Float64ExtractLowWord32(m.LoadFromPointer(&input, kMachFloat64)));
+ FOR_FLOAT64_INPUTS(i) {
+ input = bit_cast<uint64_t>(*i);
+ int32_t expected = bit_cast<int32_t>(static_cast<uint32_t>(input));
+ CHECK_EQ(expected, m.Call());
+ }
+}
+
+
+TEST(RunFloat64ExtractHighWord32) {
+ uint64_t input = 0;
+ RawMachineAssemblerTester<int32_t> m;
+ m.Return(m.Float64ExtractHighWord32(m.LoadFromPointer(&input, kMachFloat64)));
+ FOR_FLOAT64_INPUTS(i) {
+ input = bit_cast<uint64_t>(*i);
+ int32_t expected = bit_cast<int32_t>(static_cast<uint32_t>(input >> 32));
+ CHECK_EQ(expected, m.Call());
+ }
+}
+
+
+TEST(RunFloat64InsertLowWord32) {
+ uint64_t input = 0;
+ uint64_t result = 0;
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ m.StoreToPointer(
+ &result, kMachFloat64,
+ m.Float64InsertLowWord32(m.LoadFromPointer(&input, kMachFloat64),
+ m.Parameter(0)));
+ m.Return(m.Int32Constant(0));
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ input = bit_cast<uint64_t>(*i);
+ uint64_t expected = (input & ~(V8_UINT64_C(0xFFFFFFFF))) |
+ (static_cast<uint64_t>(bit_cast<uint32_t>(*j)));
+ CHECK_EQ(0, m.Call(*j));
+ CHECK_EQ(expected, result);
+ }
+ }
+}
+
+
+TEST(RunFloat64InsertHighWord32) {
+ uint64_t input = 0;
+ uint64_t result = 0;
+ RawMachineAssemblerTester<int32_t> m(kMachInt32);
+ m.StoreToPointer(
+ &result, kMachFloat64,
+ m.Float64InsertHighWord32(m.LoadFromPointer(&input, kMachFloat64),
+ m.Parameter(0)));
+ m.Return(m.Int32Constant(0));
+ FOR_FLOAT64_INPUTS(i) {
+ FOR_INT32_INPUTS(j) {
+ input = bit_cast<uint64_t>(*i);
+ uint64_t expected = (input & ~(V8_UINT64_C(0xFFFFFFFF) << 32)) |
+ (static_cast<uint64_t>(bit_cast<uint32_t>(*j)) << 32);
+ CHECK_EQ(0, m.Call(*j));
+ CHECK_EQ(expected, result);
+ }
+ }
+}
+
+
static double two_30 = 1 << 30; // 2^30 is a smi boundary.
static double two_52 = two_30 * (1 << 22); // 2^52 is a precision boundary.
static double kValues[] = {0.1,
@@ -4725,13 +4811,13 @@ static double kValues[] = {0.1,
-two_52 + 1 - 0.7};
-TEST(RunFloat64Floor) {
+TEST(RunFloat64RoundDown1) {
double input = -1.0;
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
- if (!m.machine()->HasFloat64Floor()) return;
+ if (!m.machine()->HasFloat64RoundDown()) return;
m.StoreToPointer(&result, kMachFloat64,
- m.Float64Floor(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Float64RoundDown(m.LoadFromPointer(&input, kMachFloat64)));
m.Return(m.Int32Constant(0));
for (size_t i = 0; i < arraysize(kValues); ++i) {
input = kValues[i];
@@ -4742,13 +4828,16 @@ TEST(RunFloat64Floor) {
}
-TEST(RunFloat64Ceil) {
+TEST(RunFloat64RoundDown2) {
double input = -1.0;
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
- if (!m.machine()->HasFloat64Ceil()) return;
+ if (!m.machine()->HasFloat64RoundDown()) return;
m.StoreToPointer(&result, kMachFloat64,
- m.Float64Ceil(m.LoadFromPointer(&input, kMachFloat64)));
+ m.Float64Sub(m.Float64Constant(-0.0),
+ m.Float64RoundDown(m.Float64Sub(
+ m.Float64Constant(-0.0),
+ m.LoadFromPointer(&input, kMachFloat64)))));
m.Return(m.Int32Constant(0));
for (size_t i = 0; i < arraysize(kValues); ++i) {
input = kValues[i];
@@ -4763,7 +4852,7 @@ TEST(RunFloat64RoundTruncate) {
double input = -1.0;
double result = 0.0;
RawMachineAssemblerTester<int32_t> m;
- if (!m.machine()->HasFloat64Ceil()) return;
+ if (!m.machine()->HasFloat64RoundTruncate()) return;
m.StoreToPointer(
&result, kMachFloat64,
m.Float64RoundTruncate(m.LoadFromPointer(&input, kMachFloat64)));
@@ -4793,4 +4882,5 @@ TEST(RunFloat64RoundTiesAway) {
CHECK_EQ(expected, result);
}
}
+
#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-run-stubs.cc b/deps/v8/test/cctest/compiler/test-run-stubs.cc
new file mode 100644
index 0000000000..daf9a461b1
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-run-stubs.cc
@@ -0,0 +1,106 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+#include "src/parser.h"
+#include "test/cctest/compiler/function-tester.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+
+static Handle<JSFunction> GetFunction(Isolate* isolate, const char* name) {
+ v8::ExtensionConfiguration no_extensions;
+ Handle<Context> ctx = isolate->bootstrapper()->CreateEnvironment(
+ MaybeHandle<JSGlobalProxy>(), v8::Handle<v8::ObjectTemplate>(),
+ &no_extensions);
+ Handle<JSBuiltinsObject> builtins = handle(ctx->builtins());
+ MaybeHandle<Object> fun = Object::GetProperty(isolate, builtins, name);
+ Handle<JSFunction> function = Handle<JSFunction>::cast(fun.ToHandleChecked());
+ // Just to make sure nobody calls this...
+ function->set_code(isolate->builtins()->builtin(Builtins::kIllegal));
+ return function;
+}
+
+
+class StringLengthStubTF : public CodeStub {
+ public:
+ explicit StringLengthStubTF(Isolate* isolate) : CodeStub(isolate) {}
+
+ StringLengthStubTF(uint32_t key, Isolate* isolate) : CodeStub(key, isolate) {}
+
+ CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+ return LoadDescriptor(isolate());
+ };
+
+ Handle<Code> GenerateCode() OVERRIDE {
+ Zone zone;
+ // Build a "hybrid" CompilationInfo for a JSFunction/CodeStub pair.
+ ParseInfo parse_info(&zone, GetFunction(isolate(), "STRING_LENGTH_STUB"));
+ CompilationInfo info(&parse_info);
+ info.SetStub(this);
+ // Run a "mini pipeline", extracted from compiler.cc.
+ CHECK(Parser::ParseStatic(info.parse_info()));
+ CHECK(Compiler::Analyze(info.parse_info()));
+ return Pipeline(&info).GenerateCode();
+ }
+
+ Major MajorKey() const OVERRIDE { return StringLength; };
+ Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
+ InlineCacheState GetICState() const OVERRIDE { return MONOMORPHIC; }
+ ExtraICState GetExtraICState() const OVERRIDE { return Code::LOAD_IC; }
+ Code::StubType GetStubType() const OVERRIDE { return Code::FAST; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringLengthStubTF);
+};
+
+
+TEST(RunStringLengthStubTF) {
+ HandleAndZoneScope scope;
+ Isolate* isolate = scope.main_isolate();
+ Zone* zone = scope.main_zone();
+
+ // Create code and an accompanying descriptor.
+ StringLengthStubTF stub(isolate);
+ Handle<Code> code = stub.GenerateCode();
+ CompilationInfo info(&stub, isolate, zone);
+ CallDescriptor* descriptor = Linkage::ComputeIncoming(zone, &info);
+
+ // Create a function to call the code using the descriptor.
+ Graph graph(zone);
+ CommonOperatorBuilder common(zone);
+ // FunctionTester (ab)uses a 2-argument function
+ Node* start = graph.NewNode(common.Start(2));
+ // Parameter 0 is the receiver
+ Node* receiverParam = graph.NewNode(common.Parameter(1), start);
+ Node* nameParam = graph.NewNode(common.Parameter(2), start);
+ Unique<HeapObject> u = Unique<HeapObject>::CreateImmovable(code);
+ Node* theCode = graph.NewNode(common.HeapConstant(u));
+ Node* dummyContext = graph.NewNode(common.NumberConstant(0.0));
+ Node* call = graph.NewNode(common.Call(descriptor), theCode, receiverParam,
+ nameParam, dummyContext, start, start);
+ Node* ret = graph.NewNode(common.Return(), call, call, start);
+ Node* end = graph.NewNode(common.End(), ret);
+ graph.SetStart(start);
+ graph.SetEnd(end);
+ FunctionTester ft(&graph);
+
+ // Actuall call through to the stub, verifying its result.
+ const char* testString = "Und das Lamm schrie HURZ!";
+ Handle<JSReceiver> receiverArg =
+ Object::ToObject(isolate, ft.Val(testString)).ToHandleChecked();
+ Handle<String> nameArg = ft.Val("length");
+ Handle<Object> result = ft.Call(receiverArg, nameArg).ToHandleChecked();
+ CHECK_EQ(static_cast<int>(strlen(testString)), Smi::cast(*result)->value());
+}
+
+#endif // V8_TURBOFAN_TARGET
diff --git a/deps/v8/test/cctest/compiler/test-run-variables.cc b/deps/v8/test/cctest/compiler/test-run-variables.cc
index bf86e0d42c..4e5fd181f4 100644
--- a/deps/v8/test/cctest/compiler/test-run-variables.cc
+++ b/deps/v8/test/cctest/compiler/test-run-variables.cc
@@ -49,7 +49,6 @@ static const char* bind_tests[] = {
static void RunVariableTests(const char* source, const char* tests[]) {
- FLAG_harmony_scoping = true;
EmbeddedVector<char, 512> buffer;
for (int i = 0; tests[i] != NULL; i += 3) {
diff --git a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
index 6e2480e51e..634483bf24 100644
--- a/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
+++ b/deps/v8/test/cctest/compiler/test-simplified-lowering.cc
@@ -790,25 +790,6 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
};
-#if V8_TURBOFAN_TARGET
-
-TEST(LowerAnyToBoolean_tagged_tagged) {
- // AnyToBoolean(x: kRepTagged) used as kRepTagged
- TestingGraph t(Type::Any());
- Node* x = t.p0;
- Node* cnv = t.graph()->NewNode(t.simplified()->AnyToBoolean(), x);
- Node* use = t.Use(cnv, kRepTagged);
- t.Return(use);
- t.Lower();
- CHECK_EQ(IrOpcode::kCall, cnv->opcode());
- CHECK_EQ(IrOpcode::kHeapConstant, cnv->InputAt(0)->opcode());
- CHECK_EQ(x, cnv->InputAt(1));
- CHECK_EQ(t.jsgraph.NoContextConstant(), cnv->InputAt(2));
-}
-
-#endif
-
-
TEST(LowerBooleanNot_bit_bit) {
// BooleanNot(x: kRepBit) used as kRepBit
TestingGraph t(Type::Boolean());
@@ -1441,27 +1422,43 @@ TEST(LowerLoadField_to_load) {
TEST(LowerStoreField_to_store) {
- TestingGraph t(Type::Any(), Type::Signed32());
+ {
+ TestingGraph t(Type::Any(), Type::Signed32());
- for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachineReps[i]};
+ for (size_t i = 0; i < arraysize(kMachineReps); i++) {
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(), kMachineReps[i]};
- Node* val = t.ExampleWithOutput(kMachineReps[i]);
+ Node* val = t.ExampleWithOutput(kMachineReps[i]);
+ Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
+ val, t.start, t.start);
+ t.Effect(store);
+ t.Lower();
+ CHECK_EQ(IrOpcode::kStore, store->opcode());
+ CHECK_EQ(val, store->InputAt(2));
+ CheckFieldAccessArithmetic(access, store);
+
+ StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
+ if (kMachineReps[i] & kRepTagged) {
+ CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
+ }
+ CHECK_EQ(kMachineReps[i], rep.machine_type());
+ }
+ }
+ {
+ TestingGraph t(Type::Any(),
+ Type::Intersect(Type::SignedSmall(), Type::TaggedSigned()));
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(), kMachAnyTagged};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
- val, t.start, t.start);
+ t.p1, t.start, t.start);
t.Effect(store);
t.Lower();
CHECK_EQ(IrOpcode::kStore, store->opcode());
- CHECK_EQ(val, store->InputAt(2));
- CheckFieldAccessArithmetic(access, store);
-
+ CHECK_EQ(t.p1, store->InputAt(2));
StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
- if (kMachineReps[i] & kRepTagged) {
- CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
- }
- CHECK_EQ(kMachineReps[i], rep.machine_type());
+ CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
}
@@ -1489,26 +1486,42 @@ TEST(LowerLoadElement_to_load) {
TEST(LowerStoreElement_to_store) {
- TestingGraph t(Type::Any(), Type::Signed32());
+ {
+ TestingGraph t(Type::Any(), Type::Signed32());
- for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), kMachineReps[i]};
+ for (size_t i = 0; i < arraysize(kMachineReps); i++) {
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Type::Any(), kMachineReps[i]};
+
+ Node* val = t.ExampleWithOutput(kMachineReps[i]);
+ Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access),
+ t.p0, t.p1, val, t.start, t.start);
+ t.Effect(store);
+ t.Lower();
+ CHECK_EQ(IrOpcode::kStore, store->opcode());
+ CHECK_EQ(val, store->InputAt(2));
+ CheckElementAccessArithmetic(access, store);
- Node* val = t.ExampleWithOutput(kMachineReps[i]);
+ StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
+ if (kMachineReps[i] & kRepTagged) {
+ CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
+ }
+ CHECK_EQ(kMachineReps[i], rep.machine_type());
+ }
+ }
+ {
+ TestingGraph t(Type::Any(), Type::Signed32(),
+ Type::Intersect(Type::SignedSmall(), Type::TaggedSigned()));
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Type::Any(), kMachAnyTagged};
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
- t.p1, val, t.start, t.start);
+ t.p1, t.p2, t.start, t.start);
t.Effect(store);
t.Lower();
CHECK_EQ(IrOpcode::kStore, store->opcode());
- CHECK_EQ(val, store->InputAt(2));
- CheckElementAccessArithmetic(access, store);
-
+ CHECK_EQ(t.p2, store->InputAt(2));
StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
- if (kMachineReps[i] & kRepTagged) {
- CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
- }
- CHECK_EQ(kMachineReps[i], rep.machine_type());
+ CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
}
@@ -1938,10 +1951,10 @@ TEST(NumberModulus_TruncatingToUint32) {
Node* k = t.jsgraph.Constant(constants[i]);
Node* mod = t.graph()->NewNode(t.simplified()->NumberModulus(), t.p0, k);
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), mod);
- Node* ret = t.Return(trunc);
+ t.Return(trunc);
t.Lower();
- CHECK_EQ(IrOpcode::kUint32Mod, ret->InputAt(0)->opcode());
+ CHECK_EQ(IrOpcode::kUint32Mod, t.ret->InputAt(0)->InputAt(0)->opcode());
}
}
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index bbb74c0a71..be69111364 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -605,3 +605,86 @@ THREADED_TEST(Regress433458) {
"Object.defineProperty(obj, 'prop', { writable: false });"
"Object.defineProperty(obj, 'prop', { writable: true });");
}
+
+
+static bool security_check_value = false;
+
+
+static bool SecurityTestCallback(Local<v8::Object> global, Local<Value> name,
+ v8::AccessType type, Local<Value> data) {
+ return security_check_value;
+}
+
+
+TEST(PrototypeGetterAccessCheck) {
+ i::FLAG_allow_natives_syntax = true;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+ auto fun_templ = v8::FunctionTemplate::New(isolate);
+ auto getter_templ = v8::FunctionTemplate::New(isolate, handle_property);
+ getter_templ->SetAcceptAnyReceiver(false);
+ fun_templ->InstanceTemplate()->SetAccessorProperty(v8_str("foo"),
+ getter_templ);
+ auto obj_templ = v8::ObjectTemplate::New(isolate);
+ obj_templ->SetAccessCheckCallbacks(SecurityTestCallback, nullptr);
+ env->Global()->Set(v8_str("Fun"), fun_templ->GetFunction());
+ env->Global()->Set(v8_str("obj"), obj_templ->NewInstance());
+ env->Global()->Set(v8_str("obj2"), obj_templ->NewInstance());
+
+ security_check_value = true;
+ CompileRun("var proto = new Fun();");
+ CompileRun("obj.__proto__ = proto;");
+ ExpectInt32("proto.foo", 907);
+
+ // Test direct.
+ security_check_value = true;
+ ExpectInt32("obj.foo", 907);
+ security_check_value = false;
+ {
+ v8::TryCatch try_catch(isolate);
+ CompileRun("obj.foo");
+ CHECK(try_catch.HasCaught());
+ }
+
+ // Test through call.
+ security_check_value = true;
+ ExpectInt32("proto.__lookupGetter__('foo').call(obj)", 907);
+ security_check_value = false;
+ {
+ v8::TryCatch try_catch(isolate);
+ CompileRun("proto.__lookupGetter__('foo').call(obj)");
+ CHECK(try_catch.HasCaught());
+ }
+
+ // Test ics.
+ CompileRun(
+ "function f() {"
+ " var x;"
+ " for (var i = 0; i < 4; i++) {"
+ " x = obj.foo;"
+ " }"
+ " return x;"
+ "}");
+
+ security_check_value = true;
+ ExpectInt32("f()", 907);
+ security_check_value = false;
+ {
+ v8::TryCatch try_catch(isolate);
+ CompileRun("f();");
+ CHECK(try_catch.HasCaught());
+ }
+
+ // Test crankshaft.
+ CompileRun("%OptimizeFunctionOnNextCall(f);");
+
+ security_check_value = true;
+ ExpectInt32("f()", 907);
+ security_check_value = false;
+ {
+ v8::TryCatch try_catch(isolate);
+ CompileRun("f();");
+ CHECK(try_catch.HasCaught());
+ }
+}
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index 79ba4a486e..0ec9934644 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -63,7 +63,7 @@ static AllocationResult AllocateAfterFailures() {
heap->AllocateFixedArray(10000, TENURED).ToObjectChecked();
// Large object space.
- static const int kLargeObjectSpaceFillerLength = 300000;
+ static const int kLargeObjectSpaceFillerLength = 3 * (Page::kPageSize / 10);
static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
kLargeObjectSpaceFillerLength);
DCHECK(kLargeObjectSpaceFillerSize > heap->old_pointer_space()->AreaSize());
@@ -210,7 +210,7 @@ TEST(CodeRange) {
// Geometrically distributed sizes, greater than
// Page::kMaxRegularHeapObjectSize (which is greater than code page area).
// TODO(gc): instead of using 3 use some contant based on code_range_size
- // kMaxHeapObjectSize.
+ // kMaxRegularHeapObjectSize.
size_t requested =
(Page::kMaxRegularHeapObjectSize << (Pseudorandom() % 3)) +
Pseudorandom() % 5000 + 1;
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index a2acb24d76..e3bee15318 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -15,7 +15,6 @@
#include "src/objects.h"
#include "src/parser.h"
#include "src/smart-pointers.h"
-#include "src/snapshot.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
@@ -2934,16 +2933,8 @@ struct AccessCheckData {
};
-bool SimpleNamedAccessChecker(Local<v8::Object> global, Local<Value> name,
- v8::AccessType type, Local<Value> data) {
- auto access_check_data = GetWrappedObject<AccessCheckData>(data);
- access_check_data->count++;
- return access_check_data->result;
-}
-
-
-bool SimpleIndexedAccessChecker(Local<v8::Object> global, uint32_t index,
- v8::AccessType type, Local<Value> data) {
+bool SimpleAccessChecker(Local<v8::Object> global, Local<Value> name,
+ v8::AccessType type, Local<Value> data) {
auto access_check_data = GetWrappedObject<AccessCheckData>(data);
access_check_data->count++;
return access_check_data->result;
@@ -3015,7 +3006,7 @@ THREADED_TEST(NamedAllCanReadInterceptor) {
auto checked = v8::ObjectTemplate::New(isolate);
checked->SetAccessCheckCallbacks(
- SimpleNamedAccessChecker, nullptr,
+ SimpleAccessChecker, nullptr,
BuildWrappedObject<AccessCheckData>(isolate, &access_check_data), false);
context->Global()->Set(v8_str("intercepted_0"), intercepted_0->NewInstance());
@@ -3032,15 +3023,27 @@ THREADED_TEST(NamedAllCanReadInterceptor) {
access_check_data.result = true;
ExpectInt32("checked.whatever", 17);
- CHECK_EQ(1, access_check_data.count);
+ CHECK(!CompileRun("Object.getOwnPropertyDescriptor(checked, 'whatever')")
+ ->IsUndefined());
+ CHECK_EQ(2, access_check_data.count);
access_check_data.result = false;
ExpectInt32("checked.whatever", intercept_data_0.value);
- CHECK_EQ(2, access_check_data.count);
+ {
+ v8::TryCatch try_catch(isolate);
+ CompileRun("Object.getOwnPropertyDescriptor(checked, 'whatever')");
+ CHECK(try_catch.HasCaught());
+ }
+ CHECK_EQ(4, access_check_data.count);
intercept_data_1.should_intercept = true;
ExpectInt32("checked.whatever", intercept_data_1.value);
- CHECK_EQ(3, access_check_data.count);
+ {
+ v8::TryCatch try_catch(isolate);
+ CompileRun("Object.getOwnPropertyDescriptor(checked, 'whatever')");
+ CHECK(try_catch.HasCaught());
+ }
+ CHECK_EQ(6, access_check_data.count);
}
@@ -3081,7 +3084,7 @@ THREADED_TEST(IndexedAllCanReadInterceptor) {
auto checked = v8::ObjectTemplate::New(isolate);
checked->SetAccessCheckCallbacks(
- nullptr, SimpleIndexedAccessChecker,
+ SimpleAccessChecker, nullptr,
BuildWrappedObject<AccessCheckData>(isolate, &access_check_data), false);
context->Global()->Set(v8_str("intercepted_0"), intercepted_0->NewInstance());
@@ -3098,13 +3101,211 @@ THREADED_TEST(IndexedAllCanReadInterceptor) {
access_check_data.result = true;
ExpectInt32("checked[15]", 17);
- CHECK_EQ(1, access_check_data.count);
+ CHECK(!CompileRun("Object.getOwnPropertyDescriptor(checked, '15')")
+ ->IsUndefined());
+ CHECK_EQ(3, access_check_data.count);
access_check_data.result = false;
ExpectInt32("checked[15]", intercept_data_0.value);
- CHECK_EQ(2, access_check_data.count);
+ // Note: this should throw but without a LookupIterator it's complicated.
+ CHECK(!CompileRun("Object.getOwnPropertyDescriptor(checked, '15')")
+ ->IsUndefined());
+ CHECK_EQ(6, access_check_data.count);
intercept_data_1.should_intercept = true;
ExpectInt32("checked[15]", intercept_data_1.value);
- CHECK_EQ(3, access_check_data.count);
+ // Note: this should throw but without a LookupIterator it's complicated.
+ CHECK(!CompileRun("Object.getOwnPropertyDescriptor(checked, '15')")
+ ->IsUndefined());
+ CHECK_EQ(9, access_check_data.count);
+}
+
+
+THREADED_TEST(NonMaskingInterceptorOwnProperty) {
+ auto isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext context;
+
+ ShouldInterceptData intercept_data;
+ intercept_data.value = 239;
+ intercept_data.should_intercept = true;
+
+ auto interceptor_templ = v8::ObjectTemplate::New(isolate);
+ v8::NamedPropertyHandlerConfiguration conf(ShouldNamedInterceptor);
+ conf.flags = v8::PropertyHandlerFlags::kNonMasking;
+ conf.data = BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data);
+ interceptor_templ->SetHandler(conf);
+
+ auto interceptor = interceptor_templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), interceptor);
+
+ ExpectInt32("obj.whatever", 239);
+
+ CompileRun("obj.whatever = 4;");
+ ExpectInt32("obj.whatever", 4);
+
+ CompileRun("delete obj.whatever;");
+ ExpectInt32("obj.whatever", 239);
+}
+
+
+THREADED_TEST(NonMaskingInterceptorPrototypeProperty) {
+ auto isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext context;
+
+ ShouldInterceptData intercept_data;
+ intercept_data.value = 239;
+ intercept_data.should_intercept = true;
+
+ auto interceptor_templ = v8::ObjectTemplate::New(isolate);
+ v8::NamedPropertyHandlerConfiguration conf(ShouldNamedInterceptor);
+ conf.flags = v8::PropertyHandlerFlags::kNonMasking;
+ conf.data = BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data);
+ interceptor_templ->SetHandler(conf);
+
+ auto interceptor = interceptor_templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), interceptor);
+
+ ExpectInt32("obj.whatever", 239);
+
+ CompileRun("obj.__proto__ = {'whatever': 4};");
+ ExpectInt32("obj.whatever", 4);
+
+ CompileRun("delete obj.__proto__.whatever;");
+ ExpectInt32("obj.whatever", 239);
+}
+
+
+THREADED_TEST(NonMaskingInterceptorPrototypePropertyIC) {
+ auto isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext context;
+
+ ShouldInterceptData intercept_data;
+ intercept_data.value = 239;
+ intercept_data.should_intercept = true;
+
+ auto interceptor_templ = v8::ObjectTemplate::New(isolate);
+ v8::NamedPropertyHandlerConfiguration conf(ShouldNamedInterceptor);
+ conf.flags = v8::PropertyHandlerFlags::kNonMasking;
+ conf.data = BuildWrappedObject<ShouldInterceptData>(isolate, &intercept_data);
+ interceptor_templ->SetHandler(conf);
+
+ auto interceptor = interceptor_templ->NewInstance();
+ context->Global()->Set(v8_str("obj"), interceptor);
+
+ CompileRun(
+ "outer = {};"
+ "outer.__proto__ = obj;"
+ "function f(obj) {"
+ " var x;"
+ " for (var i = 0; i < 4; i++) {"
+ " x = obj.whatever;"
+ " }"
+ " return x;"
+ "}");
+
+ // Receiver == holder.
+ CompileRun("obj.__proto__ = null;");
+ ExpectInt32("f(obj)", 239);
+ ExpectInt32("f(outer)", 239);
+
+ // Receiver != holder.
+ CompileRun("Object.setPrototypeOf(obj, {});");
+ ExpectInt32("f(obj)", 239);
+ ExpectInt32("f(outer)", 239);
+
+ // Masked value on prototype.
+ CompileRun("obj.__proto__.whatever = 4;");
+ CompileRun("obj.__proto__.__proto__ = { 'whatever' : 5 };");
+ ExpectInt32("f(obj)", 4);
+ ExpectInt32("f(outer)", 4);
+
+ // Masked value on prototype prototype.
+ CompileRun("delete obj.__proto__.whatever;");
+ ExpectInt32("f(obj)", 5);
+ ExpectInt32("f(outer)", 5);
+
+ // Reset.
+ CompileRun("delete obj.__proto__.__proto__.whatever;");
+ ExpectInt32("f(obj)", 239);
+ ExpectInt32("f(outer)", 239);
+
+ // Masked value on self.
+ CompileRun("obj.whatever = 4;");
+ ExpectInt32("f(obj)", 4);
+ ExpectInt32("f(outer)", 4);
+
+ // Reset.
+ CompileRun("delete obj.whatever;");
+ ExpectInt32("f(obj)", 239);
+ ExpectInt32("f(outer)", 239);
+
+ CompileRun("outer.whatever = 4;");
+ ExpectInt32("f(obj)", 239);
+ ExpectInt32("f(outer)", 4);
+}
+
+
+namespace {
+
+void DatabaseGetter(Local<Name> name,
+ const v8::PropertyCallbackInfo<Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ auto context = info.GetIsolate()->GetCurrentContext();
+ Local<v8::Object> db = info.Holder()
+ ->GetRealNamedProperty(context, v8_str("db"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ if (!db->Has(context, name).FromJust()) return;
+ info.GetReturnValue().Set(db->Get(context, name).ToLocalChecked());
+}
+
+
+void DatabaseSetter(Local<Name> name, Local<Value> value,
+ const v8::PropertyCallbackInfo<Value>& info) {
+ ApiTestFuzzer::Fuzz();
+ auto context = info.GetIsolate()->GetCurrentContext();
+ if (name->Equals(v8_str("db"))) return;
+ Local<v8::Object> db = info.Holder()
+ ->GetRealNamedProperty(context, v8_str("db"))
+ .ToLocalChecked()
+ .As<v8::Object>();
+ db->Set(context, name, value).FromJust();
+ info.GetReturnValue().Set(value);
+}
+}
+
+
+THREADED_TEST(NonMaskingInterceptorGlobalEvalRegression) {
+ auto isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ LocalContext context;
+
+ auto interceptor_templ = v8::ObjectTemplate::New(isolate);
+ v8::NamedPropertyHandlerConfiguration conf(DatabaseGetter, DatabaseSetter);
+ conf.flags = v8::PropertyHandlerFlags::kNonMasking;
+ interceptor_templ->SetHandler(conf);
+
+ context->Global()->Set(v8_str("intercepted_1"),
+ interceptor_templ->NewInstance());
+ context->Global()->Set(v8_str("intercepted_2"),
+ interceptor_templ->NewInstance());
+
+ // Init dbs.
+ CompileRun(
+ "intercepted_1.db = {};"
+ "intercepted_2.db = {};");
+
+ ExpectInt32(
+ "var obj = intercepted_1;"
+ "obj.x = 4;"
+ "eval('obj.x');"
+ "eval('obj.x');"
+ "eval('obj.x');"
+ "obj = intercepted_2;"
+ "obj.x = 9;"
+ "eval('obj.x');",
+ 9);
}
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index afb70ffeee..8432cbfba1 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -45,7 +45,6 @@
#include "src/objects.h"
#include "src/parser.h"
#include "src/smart-pointers.h"
-#include "src/snapshot.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "src/vm-state.h"
@@ -61,12 +60,15 @@ using ::v8::FunctionTemplate;
using ::v8::Handle;
using ::v8::HandleScope;
using ::v8::Local;
-using ::v8::Name;
+using ::v8::Maybe;
using ::v8::Message;
using ::v8::MessageCallback;
+using ::v8::Name;
+using ::v8::None;
using ::v8::Object;
using ::v8::ObjectTemplate;
using ::v8::Persistent;
+using ::v8::PropertyAttribute;
using ::v8::Script;
using ::v8::StackTrace;
using ::v8::String;
@@ -691,28 +693,23 @@ class RandomLengthOneByteResource
THREADED_TEST(NewExternalForVeryLongString) {
+ auto isolate = CcTest::isolate();
{
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(isolate);
v8::TryCatch try_catch;
RandomLengthOneByteResource r(1 << 30);
- v8::Local<v8::String> str = v8::String::NewExternal(CcTest::isolate(), &r);
+ v8::Local<v8::String> str = v8::String::NewExternal(isolate, &r);
CHECK(str.IsEmpty());
- CHECK(try_catch.HasCaught());
- String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ(0, strcmp("RangeError: Invalid string length", *exception_value));
+ CHECK(!try_catch.HasCaught());
}
{
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(isolate);
v8::TryCatch try_catch;
RandomLengthResource r(1 << 30);
- v8::Local<v8::String> str = v8::String::NewExternal(CcTest::isolate(), &r);
+ v8::Local<v8::String> str = v8::String::NewExternal(isolate, &r);
CHECK(str.IsEmpty());
- CHECK(try_catch.HasCaught());
- String::Utf8Value exception_value(try_catch.Exception());
- CHECK_EQ(0, strcmp("RangeError: Invalid string length", *exception_value));
+ CHECK(!try_catch.HasCaught());
}
}
@@ -733,8 +730,8 @@ THREADED_TEST(ScavengeExternalString) {
CHECK(in_new_space || CcTest::heap()->old_data_space()->Contains(*istring));
CHECK_EQ(0, dispose_count);
}
- CcTest::heap()->CollectGarbage(
- in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
+ CcTest::heap()->CollectGarbage(in_new_space ? i::NEW_SPACE
+ : i::OLD_DATA_SPACE);
CHECK_EQ(1, dispose_count);
}
@@ -756,8 +753,8 @@ THREADED_TEST(ScavengeExternalOneByteString) {
CHECK(in_new_space || CcTest::heap()->old_data_space()->Contains(*istring));
CHECK_EQ(0, dispose_count);
}
- CcTest::heap()->CollectGarbage(
- in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
+ CcTest::heap()->CollectGarbage(in_new_space ? i::NEW_SPACE
+ : i::OLD_DATA_SPACE);
CHECK_EQ(1, dispose_count);
}
@@ -2124,7 +2121,7 @@ THREADED_TEST(InternalFieldsAlignedPointers) {
void* huge = reinterpret_cast<void*>(~static_cast<uintptr_t>(1));
CheckAlignedPointerInInternalField(obj, huge);
- v8::UniquePersistent<v8::Object> persistent(isolate, obj);
+ v8::Global<v8::Object> persistent(isolate, obj);
CHECK_EQ(1, Object::InternalFieldCount(persistent));
CHECK_EQ(huge, Object::GetAlignedPointerFromInternalField(persistent, 0));
}
@@ -3035,20 +3032,20 @@ THREADED_TEST(ResettingGlobalHandleToEmpty) {
template <class T>
-static v8::UniquePersistent<T> PassUnique(v8::UniquePersistent<T> unique) {
+static v8::Global<T> PassUnique(v8::Global<T> unique) {
return unique.Pass();
}
template <class T>
-static v8::UniquePersistent<T> ReturnUnique(v8::Isolate* isolate,
- const v8::Persistent<T>& global) {
- v8::UniquePersistent<String> unique(isolate, global);
+static v8::Global<T> ReturnUnique(v8::Isolate* isolate,
+ const v8::Persistent<T>& global) {
+ v8::Global<String> unique(isolate, global);
return unique.Pass();
}
-THREADED_TEST(UniquePersistent) {
+THREADED_TEST(Global) {
v8::Isolate* isolate = CcTest::isolate();
v8::Persistent<String> global;
{
@@ -3059,11 +3056,11 @@ THREADED_TEST(UniquePersistent) {
reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
int initial_handle_count = global_handles->global_handles_count();
{
- v8::UniquePersistent<String> unique(isolate, global);
+ v8::Global<String> unique(isolate, global);
CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
// Test assignment via Pass
{
- v8::UniquePersistent<String> copy = unique.Pass();
+ v8::Global<String> copy = unique.Pass();
CHECK(unique.IsEmpty());
CHECK(copy == global);
CHECK_EQ(initial_handle_count + 1,
@@ -3072,7 +3069,7 @@ THREADED_TEST(UniquePersistent) {
}
// Test ctor via Pass
{
- v8::UniquePersistent<String> copy(unique.Pass());
+ v8::Global<String> copy(unique.Pass());
CHECK(unique.IsEmpty());
CHECK(copy == global);
CHECK_EQ(initial_handle_count + 1,
@@ -3081,7 +3078,7 @@ THREADED_TEST(UniquePersistent) {
}
// Test pass through function call
{
- v8::UniquePersistent<String> copy = PassUnique(unique.Pass());
+ v8::Global<String> copy = PassUnique(unique.Pass());
CHECK(unique.IsEmpty());
CHECK(copy == global);
CHECK_EQ(initial_handle_count + 1,
@@ -3092,7 +3089,7 @@ THREADED_TEST(UniquePersistent) {
}
// Test pass from function call
{
- v8::UniquePersistent<String> unique = ReturnUnique(isolate, global);
+ v8::Global<String> unique = ReturnUnique(isolate, global);
CHECK(unique == global);
CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
}
@@ -3101,6 +3098,123 @@ THREADED_TEST(UniquePersistent) {
}
+namespace {
+
+class TwoPassCallbackData;
+void FirstPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data);
+void SecondPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data);
+
+
+class TwoPassCallbackData {
+ public:
+ TwoPassCallbackData(v8::Isolate* isolate, int* instance_counter)
+ : first_pass_called_(false),
+ second_pass_called_(false),
+ trigger_gc_(false),
+ instance_counter_(instance_counter) {
+ HandleScope scope(isolate);
+ i::ScopedVector<char> buffer(40);
+ i::SNPrintF(buffer, "%p", static_cast<void*>(this));
+ auto string =
+ v8::String::NewFromUtf8(isolate, buffer.start(),
+ v8::NewStringType::kNormal).ToLocalChecked();
+ cell_.Reset(isolate, string);
+ (*instance_counter_)++;
+ }
+
+ ~TwoPassCallbackData() {
+ CHECK(first_pass_called_);
+ CHECK(second_pass_called_);
+ CHECK(cell_.IsEmpty());
+ (*instance_counter_)--;
+ }
+
+ void FirstPass() {
+ CHECK(!first_pass_called_);
+ CHECK(!second_pass_called_);
+ CHECK(!cell_.IsEmpty());
+ cell_.Reset();
+ first_pass_called_ = true;
+ }
+
+ void SecondPass() {
+ CHECK(first_pass_called_);
+ CHECK(!second_pass_called_);
+ CHECK(cell_.IsEmpty());
+ second_pass_called_ = true;
+ delete this;
+ }
+
+ void SetWeak() {
+ cell_.SetWeak(this, FirstPassCallback, v8::WeakCallbackType::kParameter);
+ }
+
+ void MarkTriggerGc() { trigger_gc_ = true; }
+ bool trigger_gc() { return trigger_gc_; }
+
+ int* instance_counter() { return instance_counter_; }
+
+ private:
+ bool first_pass_called_;
+ bool second_pass_called_;
+ bool trigger_gc_;
+ v8::Global<v8::String> cell_;
+ int* instance_counter_;
+};
+
+
+void SecondPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data) {
+ ApiTestFuzzer::Fuzz();
+ bool trigger_gc = data.GetParameter()->trigger_gc();
+ int* instance_counter = data.GetParameter()->instance_counter();
+ data.GetParameter()->SecondPass();
+ if (!trigger_gc) return;
+ auto data_2 = new TwoPassCallbackData(data.GetIsolate(), instance_counter);
+ data_2->SetWeak();
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+}
+
+
+void FirstPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data) {
+ data.GetParameter()->FirstPass();
+ data.SetSecondPassCallback(SecondPassCallback);
+}
+
+} // namespace
+
+
+TEST(TwoPassPhantomCallbacks) {
+ auto isolate = CcTest::isolate();
+ const size_t kLength = 20;
+ int instance_counter = 0;
+ for (size_t i = 0; i < kLength; ++i) {
+ auto data = new TwoPassCallbackData(isolate, &instance_counter);
+ data->SetWeak();
+ }
+ CHECK_EQ(static_cast<int>(kLength), instance_counter);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CHECK_EQ(0, instance_counter);
+}
+
+
+TEST(TwoPassPhantomCallbacksNestedGc) {
+ auto isolate = CcTest::isolate();
+ const size_t kLength = 20;
+ TwoPassCallbackData* array[kLength];
+ int instance_counter = 0;
+ for (size_t i = 0; i < kLength; ++i) {
+ array[i] = new TwoPassCallbackData(isolate, &instance_counter);
+ array[i]->SetWeak();
+ }
+ array[5]->MarkTriggerGc();
+ array[10]->MarkTriggerGc();
+ array[15]->MarkTriggerGc();
+ CHECK_EQ(static_cast<int>(kLength), instance_counter);
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+ CHECK_EQ(0, instance_counter);
+}
+
+
template <typename K, typename V>
class WeakStdMapTraits : public v8::StdMapTraits<K, V> {
public:
@@ -3126,8 +3240,7 @@ class WeakStdMapTraits : public v8::StdMapTraits<K, V> {
return data.GetParameter()->key;
}
static void DisposeCallbackData(WeakCallbackDataType* data) { delete data; }
- static void Dispose(v8::Isolate* isolate, v8::UniquePersistent<V> value,
- K key) {}
+ static void Dispose(v8::Isolate* isolate, v8::Global<V> value, K key) {}
};
@@ -3153,7 +3266,7 @@ static void TestPersistentValueMap() {
typename Map::PersistentValueReference ref = map.GetReference(7);
CHECK(expected->Equals(ref.NewLocal(isolate)));
}
- v8::UniquePersistent<v8::Object> removed = map.Remove(7);
+ v8::Global<v8::Object> removed = map.Remove(7);
CHECK_EQ(0, static_cast<int>(map.Size()));
CHECK(expected == removed);
removed = map.Remove(7);
@@ -3165,8 +3278,7 @@ static void TestPersistentValueMap() {
{
typename Map::PersistentValueReference ref;
Local<v8::Object> expected2 = v8::Object::New(isolate);
- removed = map.Set(8, v8::UniquePersistent<v8::Object>(isolate, expected2),
- &ref);
+ removed = map.Set(8, v8::Global<v8::Object>(isolate, expected2), &ref);
CHECK_EQ(1, static_cast<int>(map.Size()));
CHECK(expected == removed);
CHECK(expected2->Equals(ref.NewLocal(isolate)));
@@ -3197,6 +3309,126 @@ TEST(PersistentValueMap) {
}
+namespace {
+
+void* IntKeyToVoidPointer(int key) { return reinterpret_cast<void*>(key << 1); }
+
+
+Local<v8::Object> NewObjectForIntKey(
+ v8::Isolate* isolate, const v8::Global<v8::ObjectTemplate>& templ,
+ int key) {
+ auto local = Local<v8::ObjectTemplate>::New(isolate, templ);
+ auto obj = local->NewInstance();
+ obj->SetAlignedPointerInInternalField(0, IntKeyToVoidPointer(key));
+ return obj;
+}
+
+
+template <typename K, typename V>
+class PhantomStdMapTraits : public v8::StdMapTraits<K, V> {
+ public:
+ typedef typename v8::GlobalValueMap<K, V, PhantomStdMapTraits<K, V>> MapType;
+ static const v8::PersistentContainerCallbackType kCallbackType =
+ v8::kWeakWithInternalFields;
+ struct WeakCallbackDataType {
+ MapType* map;
+ K key;
+ };
+ static WeakCallbackDataType* WeakCallbackParameter(MapType* map, const K& key,
+ Local<V> value) {
+ WeakCallbackDataType* data = new WeakCallbackDataType;
+ data->map = map;
+ data->key = key;
+ return data;
+ }
+ static MapType* MapFromWeakCallbackInfo(
+ const v8::WeakCallbackInfo<WeakCallbackDataType>& data) {
+ return data.GetParameter()->map;
+ }
+ static K KeyFromWeakCallbackInfo(
+ const v8::WeakCallbackInfo<WeakCallbackDataType>& data) {
+ return data.GetParameter()->key;
+ }
+ static void DisposeCallbackData(WeakCallbackDataType* data) { delete data; }
+ static void Dispose(v8::Isolate* isolate, v8::Global<V> value, K key) {
+ CHECK_EQ(IntKeyToVoidPointer(key),
+ v8::Object::GetAlignedPointerFromInternalField(value, 0));
+ }
+ static void DisposeWeak(
+ v8::Isolate* isolate,
+ const v8::WeakCallbackInfo<WeakCallbackDataType>& info, K key) {
+ CHECK_EQ(IntKeyToVoidPointer(key), info.GetInternalField(0));
+ DisposeCallbackData(info.GetParameter());
+ }
+};
+
+} // namespace
+
+
+TEST(GlobalValueMap) {
+ typedef v8::GlobalValueMap<int, v8::Object,
+ PhantomStdMapTraits<int, v8::Object>> Map;
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::Global<ObjectTemplate> templ;
+ {
+ HandleScope scope(isolate);
+ auto t = ObjectTemplate::New(isolate);
+ t->SetInternalFieldCount(1);
+ templ.Reset(isolate, t);
+ }
+ Map map(isolate);
+ v8::internal::GlobalHandles* global_handles =
+ reinterpret_cast<v8::internal::Isolate*>(isolate)->global_handles();
+ int initial_handle_count = global_handles->global_handles_count();
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ {
+ HandleScope scope(isolate);
+ Local<v8::Object> obj = map.Get(7);
+ CHECK(obj.IsEmpty());
+ Local<v8::Object> expected = v8::Object::New(isolate);
+ map.Set(7, expected);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ obj = map.Get(7);
+ CHECK(expected->Equals(obj));
+ {
+ Map::PersistentValueReference ref = map.GetReference(7);
+ CHECK(expected->Equals(ref.NewLocal(isolate)));
+ }
+ v8::Global<v8::Object> removed = map.Remove(7);
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ CHECK(expected == removed);
+ removed = map.Remove(7);
+ CHECK(removed.IsEmpty());
+ map.Set(8, expected);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ map.Set(8, expected);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ {
+ Map::PersistentValueReference ref;
+ Local<v8::Object> expected2 = NewObjectForIntKey(isolate, templ, 8);
+ removed = map.Set(8, v8::Global<v8::Object>(isolate, expected2), &ref);
+ CHECK_EQ(1, static_cast<int>(map.Size()));
+ CHECK(expected == removed);
+ CHECK(expected2->Equals(ref.NewLocal(isolate)));
+ }
+ }
+ CHECK_EQ(initial_handle_count + 1, global_handles->global_handles_count());
+ CcTest::i_isolate()->heap()->CollectAllGarbage(
+ i::Heap::kAbortIncrementalMarkingMask);
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ CHECK_EQ(initial_handle_count, global_handles->global_handles_count());
+ {
+ HandleScope scope(isolate);
+ Local<v8::Object> value = NewObjectForIntKey(isolate, templ, 9);
+ map.Set(9, value);
+ map.Clear();
+ }
+ CHECK_EQ(0, static_cast<int>(map.Size()));
+ CHECK_EQ(initial_handle_count, global_handles->global_handles_count());
+}
+
+
TEST(PersistentValueVector) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
@@ -3209,7 +3441,7 @@ TEST(PersistentValueVector) {
Local<v8::Object> obj1 = v8::Object::New(isolate);
Local<v8::Object> obj2 = v8::Object::New(isolate);
- v8::UniquePersistent<v8::Object> obj3(isolate, v8::Object::New(isolate));
+ v8::Global<v8::Object> obj3(isolate, v8::Object::New(isolate));
CHECK(vector.IsEmpty());
CHECK_EQ(0, static_cast<int>(vector.Size()));
@@ -3891,6 +4123,7 @@ static void check_message_3(v8::Handle<v8::Message> message,
CHECK(message->GetScriptOrigin().ResourceIsSharedCrossOrigin()->Value());
CHECK(message->GetScriptOrigin().ResourceIsEmbedderDebugScript()->Value());
CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
+ CHECK_EQ(7.40, message->GetScriptOrigin().SourceMapUrl()->NumberValue());
message_received = true;
}
@@ -3902,10 +4135,10 @@ TEST(MessageHandler3) {
CHECK(!message_received);
v8::V8::AddMessageListener(check_message_3);
LocalContext context;
- v8::ScriptOrigin origin =
- v8::ScriptOrigin(v8_str("6.75"), v8::Integer::New(isolate, 1),
- v8::Integer::New(isolate, 2), v8::True(isolate),
- Handle<v8::Integer>(), v8::True(isolate));
+ v8::ScriptOrigin origin = v8::ScriptOrigin(
+ v8_str("6.75"), v8::Integer::New(isolate, 1),
+ v8::Integer::New(isolate, 2), v8::True(isolate), Handle<v8::Integer>(),
+ v8::True(isolate), v8_str("7.40"));
v8::Handle<v8::Script> script =
Script::Compile(v8_str("throw 'error'"), &origin);
script->Run();
@@ -6266,12 +6499,13 @@ THREADED_TEST(ErrorWithMissingScriptInfo) {
struct FlagAndPersistent {
bool flag;
- v8::Persistent<v8::Object> handle;
+ v8::Global<v8::Object> handle;
};
-static void SetFlag(const v8::PhantomCallbackData<FlagAndPersistent>& data) {
+static void SetFlag(const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
data.GetParameter()->flag = true;
+ data.GetParameter()->handle.Reset();
}
@@ -6309,8 +6543,10 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
object_a.flag = false;
object_b.flag = false;
- object_a.handle.SetPhantom(&object_a, &SetFlag);
- object_b.handle.SetPhantom(&object_b, &SetFlag);
+ object_a.handle.SetWeak(&object_a, &SetFlag,
+ v8::WeakCallbackType::kParameter);
+ object_b.handle.SetWeak(&object_b, &SetFlag,
+ v8::WeakCallbackType::kParameter);
CHECK(!object_b.handle.IsIndependent());
object_a.handle.MarkIndependent();
object_b.handle.MarkIndependent();
@@ -6365,7 +6601,7 @@ class Trivial2 {
void CheckInternalFields(
- const v8::PhantomCallbackData<v8::Persistent<v8::Object>>& data) {
+ const v8::WeakCallbackInfo<v8::Persistent<v8::Object>>& data) {
v8::Persistent<v8::Object>* handle = data.GetParameter();
handle->Reset();
Trivial* t1 = reinterpret_cast<Trivial*>(data.GetInternalField1());
@@ -6405,8 +6641,8 @@ void InternalFieldCallback(bool global_gc) {
reinterpret_cast<Trivial2*>(obj->GetAlignedPointerFromInternalField(1));
CHECK_EQ(103, t2->x());
- handle.SetPhantom<v8::Persistent<v8::Object>>(&handle, CheckInternalFields,
- 0, 1);
+ handle.SetWeak<v8::Persistent<v8::Object>>(
+ &handle, CheckInternalFields, v8::WeakCallbackType::kInternalFields);
if (!global_gc) {
handle.MarkIndependent();
}
@@ -7673,30 +7909,9 @@ TEST(TryCatchFinallyStoresMessageUsingTryCatchHandler) {
// For use within the TestSecurityHandler() test.
static bool g_security_callback_result = false;
-static bool NamedSecurityTestCallback(Local<v8::Object> global,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
+static bool SecurityTestCallback(Local<v8::Object> global, Local<Value> name,
+ v8::AccessType type, Local<Value> data) {
printf("a\n");
- // Always allow read access.
- if (type == v8::ACCESS_GET)
- return true;
-
- // Sometimes allow other access.
- return g_security_callback_result;
-}
-
-
-static bool IndexedSecurityTestCallback(Local<v8::Object> global,
- uint32_t key,
- v8::AccessType type,
- Local<Value> data) {
- printf("b\n");
- // Always allow read access.
- if (type == v8::ACCESS_GET)
- return true;
-
- // Sometimes allow other access.
return g_security_callback_result;
}
@@ -7707,8 +7922,7 @@ TEST(SecurityHandler) {
v8::HandleScope scope0(isolate);
v8::Handle<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallbacks(NamedSecurityTestCallback,
- IndexedSecurityTestCallback);
+ global_template->SetAccessCheckCallbacks(SecurityTestCallback, NULL);
// Create an environment
v8::Handle<Context> context0 = Context::New(isolate, NULL, global_template);
context0->Enter();
@@ -7735,6 +7949,7 @@ TEST(SecurityHandler) {
v8::Handle<Script> script1 =
v8_compile("othercontext.foo = 222; othercontext[0] = 888;");
script1->Run();
+ g_security_callback_result = true;
// This read will pass the security check.
v8::Handle<Value> foo1 = global0->Get(v8_str("foo"));
CHECK_EQ(111, foo1->Int32Value());
@@ -7743,7 +7958,7 @@ TEST(SecurityHandler) {
CHECK_EQ(999, z1->Int32Value());
// Create another environment, should pass security checks.
- { g_security_callback_result = true; // allow security handler to pass.
+ {
v8::HandleScope scope2(isolate);
LocalContext context2;
v8::Handle<v8::Object> global2 = context2->Global();
@@ -7866,26 +8081,13 @@ THREADED_TEST(SecurityChecksForPrototypeChain) {
}
-static bool named_security_check_with_gc_called;
+static bool security_check_with_gc_called;
-static bool NamedSecurityCallbackWithGC(Local<v8::Object> global,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
+static bool SecurityTestCallbackWithGC(Local<v8::Object> global,
+ Local<v8::Value> name,
+ v8::AccessType type, Local<Value> data) {
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- named_security_check_with_gc_called = true;
- return true;
-}
-
-
-static bool indexed_security_check_with_gc_called;
-
-static bool IndexedSecurityTestCallbackWithGC(Local<v8::Object> global,
- uint32_t key,
- v8::AccessType type,
- Local<Value> data) {
- CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- indexed_security_check_with_gc_called = true;
+ security_check_with_gc_called = true;
return true;
}
@@ -7895,29 +8097,20 @@ TEST(SecurityTestGCAllowed) {
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
- object_template->SetAccessCheckCallbacks(NamedSecurityCallbackWithGC,
- IndexedSecurityTestCallbackWithGC);
+ object_template->SetAccessCheckCallbacks(SecurityTestCallbackWithGC, NULL);
v8::Handle<Context> context = Context::New(isolate);
v8::Context::Scope context_scope(context);
context->Global()->Set(v8_str("obj"), object_template->NewInstance());
- named_security_check_with_gc_called = false;
- CompileRun("obj.foo = new String(1001);");
- CHECK(named_security_check_with_gc_called);
-
- indexed_security_check_with_gc_called = false;
+ security_check_with_gc_called = false;
CompileRun("obj[0] = new String(1002);");
- CHECK(indexed_security_check_with_gc_called);
-
- named_security_check_with_gc_called = false;
- CHECK(CompileRun("obj.foo")->ToString(isolate)->Equals(v8_str("1001")));
- CHECK(named_security_check_with_gc_called);
+ CHECK(security_check_with_gc_called);
- indexed_security_check_with_gc_called = false;
+ security_check_with_gc_called = false;
CHECK(CompileRun("obj[0]")->ToString(isolate)->Equals(v8_str("1002")));
- CHECK(indexed_security_check_with_gc_called);
+ CHECK(security_check_with_gc_called);
}
@@ -8275,22 +8468,11 @@ TEST(DetachedAccesses) {
}
-static bool allowed_access_type[v8::ACCESS_KEYS + 1] = { false };
-static bool NamedAccessBlocker(Local<v8::Object> global,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
+static bool allowed_access = false;
+static bool AccessBlocker(Local<v8::Object> global, Local<Value> name,
+ v8::AccessType type, Local<Value> data) {
return CcTest::isolate()->GetCurrentContext()->Global()->Equals(global) ||
- allowed_access_type[type];
-}
-
-
-static bool IndexedAccessBlocker(Local<v8::Object> global,
- uint32_t key,
- v8::AccessType type,
- Local<Value> data) {
- return CcTest::isolate()->GetCurrentContext()->Global()->Equals(global) ||
- allowed_access_type[type];
+ allowed_access;
}
@@ -8338,8 +8520,7 @@ TEST(AccessControl) {
v8::Handle<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallbacks(NamedAccessBlocker,
- IndexedAccessBlocker);
+ global_template->SetAccessCheckCallbacks(AccessBlocker, NULL);
// Add an accessor accessible by cross-domain JS code.
global_template->SetAccessor(
@@ -8413,15 +8594,10 @@ TEST(AccessControl) {
CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, '239')").IsEmpty());
CHECK(CompileRun("propertyIsEnumerable.call(other, '239')").IsEmpty());
- // Enable ACCESS_HAS
- allowed_access_type[v8::ACCESS_HAS] = true;
- CHECK(CompileRun("other[239]").IsEmpty());
- // ... and now we can get the descriptor...
- CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, '239').value")
- .IsEmpty());
- // ... and enumerate the property.
+ allowed_access = true;
+ // Now we can enumerate the property.
ExpectTrue("propertyIsEnumerable.call(other, '239')");
- allowed_access_type[v8::ACCESS_HAS] = false;
+ allowed_access = false;
// Access a property with JS accessor.
CHECK(CompileRun("other.js_accessor_p = 2").IsEmpty());
@@ -8430,9 +8606,7 @@ TEST(AccessControl) {
CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, 'js_accessor_p')")
.IsEmpty());
- // Enable both ACCESS_HAS and ACCESS_GET.
- allowed_access_type[v8::ACCESS_HAS] = true;
- allowed_access_type[v8::ACCESS_GET] = true;
+ allowed_access = true;
ExpectString("other.js_accessor_p", "getter");
ExpectObject(
@@ -8442,8 +8616,7 @@ TEST(AccessControl) {
ExpectUndefined(
"Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
- allowed_access_type[v8::ACCESS_HAS] = false;
- allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access = false;
// Access an element with JS accessor.
CHECK(CompileRun("other[42] = 2").IsEmpty());
@@ -8451,17 +8624,14 @@ TEST(AccessControl) {
CHECK(CompileRun("other[42]").IsEmpty());
CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, '42')").IsEmpty());
- // Enable both ACCESS_HAS and ACCESS_GET.
- allowed_access_type[v8::ACCESS_HAS] = true;
- allowed_access_type[v8::ACCESS_GET] = true;
+ allowed_access = true;
ExpectString("other[42]", "el_getter");
ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
- allowed_access_type[v8::ACCESS_HAS] = false;
- allowed_access_type[v8::ACCESS_GET] = false;
+ allowed_access = false;
v8::Handle<Value> value;
@@ -8514,8 +8684,7 @@ TEST(AccessControlES5) {
v8::Handle<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallbacks(NamedAccessBlocker,
- IndexedAccessBlocker);
+ global_template->SetAccessCheckCallbacks(AccessBlocker, NULL);
// Add accessible accessor.
global_template->SetAccessor(
@@ -8578,14 +8747,9 @@ TEST(AccessControlES5) {
}
-static bool BlockEverythingNamed(Local<v8::Object> object, Local<Value> name,
- v8::AccessType type, Local<Value> data) {
- return false;
-}
-
-
-static bool BlockEverythingIndexed(Local<v8::Object> object, uint32_t key,
- v8::AccessType type, Local<Value> data) {
+static bool AccessAlwaysBlocked(Local<v8::Object> global, Local<Value> name,
+ v8::AccessType type, Local<Value> data) {
+ i::PrintF("Access blocked.\n");
return false;
}
@@ -8597,8 +8761,7 @@ THREADED_TEST(AccessControlGetOwnPropertyNames) {
v8::ObjectTemplate::New(isolate);
obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
- obj_template->SetAccessCheckCallbacks(BlockEverythingNamed,
- BlockEverythingIndexed);
+ obj_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
// Create an environment
v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
@@ -8641,8 +8804,7 @@ TEST(SuperAccessControl) {
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> obj_template =
v8::ObjectTemplate::New(isolate);
- obj_template->SetAccessCheckCallbacks(BlockEverythingNamed,
- BlockEverythingIndexed);
+ obj_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
LocalContext env;
env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
@@ -8690,6 +8852,32 @@ TEST(SuperAccessControl) {
}
+TEST(Regress470113) {
+ i::FLAG_harmony_classes = true;
+ i::FLAG_harmony_object_literals = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope handle_scope(isolate);
+ v8::Handle<v8::ObjectTemplate> obj_template =
+ v8::ObjectTemplate::New(isolate);
+ obj_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
+ LocalContext env;
+ env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
+
+ {
+ v8::TryCatch try_catch;
+ CompileRun(
+ "'use strict';\n"
+ "class C extends Object {\n"
+ " m() { super.powned = 'Powned!'; }\n"
+ "}\n"
+ "let c = new C();\n"
+ "c.m.call(prohibited)");
+
+ CHECK(try_catch.HasCaught());
+ }
+}
+
+
static void ConstTenGetter(Local<String> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
info.GetReturnValue().Set(v8_num(10));
@@ -8749,31 +8937,18 @@ THREADED_TEST(CrossDomainAccessors) {
}
-static int named_access_count = 0;
-static int indexed_access_count = 0;
+static int access_count = 0;
-static bool NamedAccessCounter(Local<v8::Object> global,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
- named_access_count++;
- return true;
-}
-
-
-static bool IndexedAccessCounter(Local<v8::Object> global,
- uint32_t key,
- v8::AccessType type,
- Local<Value> data) {
- indexed_access_count++;
+static bool AccessCounter(Local<v8::Object> global, Local<Value> name,
+ v8::AccessType type, Local<Value> data) {
+ access_count++;
return true;
}
// This one is too easily disturbed by other tests.
TEST(AccessControlIC) {
- named_access_count = 0;
- indexed_access_count = 0;
+ access_count = 0;
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -8786,8 +8961,7 @@ TEST(AccessControlIC) {
// called for cross-domain access.
v8::Handle<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
- object_template->SetAccessCheckCallbacks(NamedAccessCounter,
- IndexedAccessCounter);
+ object_template->SetAccessCheckCallbacks(AccessCounter, NULL);
Local<v8::Object> object = object_template->NewInstance();
v8::HandleScope scope1(isolate);
@@ -8811,7 +8985,7 @@ TEST(AccessControlIC) {
value = CompileRun("testProp(obj)");
CHECK(value->IsNumber());
CHECK_EQ(1, value->Int32Value());
- CHECK_EQ(21, named_access_count);
+ CHECK_EQ(21, access_count);
// Check that the named access-control function is called every time.
CompileRun("var p = 'prop';"
@@ -8825,16 +8999,18 @@ TEST(AccessControlIC) {
value = CompileRun("testKeyed(obj)");
CHECK(value->IsNumber());
CHECK_EQ(1, value->Int32Value());
- CHECK_EQ(42, named_access_count);
+ CHECK_EQ(42, access_count);
// Force the inline caches into generic state and try again.
CompileRun("testKeyed({ a: 0 })");
CompileRun("testKeyed({ b: 0 })");
value = CompileRun("testKeyed(obj)");
CHECK(value->IsNumber());
CHECK_EQ(1, value->Int32Value());
- CHECK_EQ(63, named_access_count);
+ CHECK_EQ(63, access_count);
// Check that the indexed access-control function is called every time.
+ access_count = 0;
+
CompileRun("function testIndexed(obj) {"
" for (var i = 0; i < 10; i++) obj[0] = 1;"
" for (var j = 0; j < 10; j++) obj[0];"
@@ -8843,15 +9019,16 @@ TEST(AccessControlIC) {
value = CompileRun("testIndexed(obj)");
CHECK(value->IsNumber());
CHECK_EQ(1, value->Int32Value());
- CHECK_EQ(21, indexed_access_count);
+ CHECK_EQ(21, access_count);
// Force the inline caches into generic state.
CompileRun("testIndexed(new Array(1))");
// Test that the indexed access check is called.
value = CompileRun("testIndexed(obj)");
CHECK(value->IsNumber());
CHECK_EQ(1, value->Int32Value());
- CHECK_EQ(42, indexed_access_count);
+ CHECK_EQ(42, access_count);
+ access_count = 0;
// Check that the named access check is called when invoking
// functions on an object that requires access checks.
CompileRun("obj.f = function() {}");
@@ -8859,7 +9036,8 @@ TEST(AccessControlIC) {
" for (var i = 0; i < 10; i++) obj.f();"
"}");
CompileRun("testCallNormal(obj)");
- CHECK_EQ(74, named_access_count);
+ printf("%i\n", access_count);
+ CHECK_EQ(11, access_count);
// Force obj into slow case.
value = CompileRun("delete obj.prop");
@@ -8870,89 +9048,14 @@ TEST(AccessControlIC) {
value = CompileRun("testProp(obj);");
CHECK(value->IsNumber());
CHECK_EQ(1, value->Int32Value());
- CHECK_EQ(96, named_access_count);
+ CHECK_EQ(33, access_count);
// Force the call inline cache into dictionary probing mode.
CompileRun("o.f = function() {}; testCallNormal(o)");
// Test that the named access check is still called for each
// invocation of the function.
value = CompileRun("testCallNormal(obj)");
- CHECK_EQ(106, named_access_count);
-
- context1->Exit();
- context0->Exit();
-}
-
-
-static bool NamedAccessFlatten(Local<v8::Object> global,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
- char buf[100];
- int len;
-
- CHECK(name->IsString());
-
- memset(buf, 0x1, sizeof(buf));
- len = name.As<String>()->WriteOneByte(reinterpret_cast<uint8_t*>(buf));
- CHECK_EQ(4, len);
-
- uint16_t buf2[100];
-
- memset(buf, 0x1, sizeof(buf));
- len = name.As<String>()->Write(buf2);
- CHECK_EQ(4, len);
-
- return true;
-}
-
-
-static bool IndexedAccessFlatten(Local<v8::Object> global,
- uint32_t key,
- v8::AccessType type,
- Local<Value> data) {
- return true;
-}
-
-
-// Regression test. In access checks, operations that may cause
-// garbage collection are not allowed. It used to be the case that
-// using the Write operation on a string could cause a garbage
-// collection due to flattening of the string. This is no longer the
-// case.
-THREADED_TEST(AccessControlFlatten) {
- named_access_count = 0;
- indexed_access_count = 0;
-
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
-
- // Create an environment.
- v8::Local<Context> context0 = Context::New(isolate);
- context0->Enter();
-
- // Create an object that requires access-check functions to be
- // called for cross-domain access.
- v8::Handle<v8::ObjectTemplate> object_template =
- v8::ObjectTemplate::New(isolate);
- object_template->SetAccessCheckCallbacks(NamedAccessFlatten,
- IndexedAccessFlatten);
- Local<v8::Object> object = object_template->NewInstance();
-
- v8::HandleScope scope1(isolate);
-
- // Create another environment.
- v8::Local<Context> context1 = Context::New(isolate);
- context1->Enter();
-
- // Make easy access to the object from the other environment.
- v8::Handle<v8::Object> global1 = context1->Global();
- global1->Set(v8_str("obj"), object);
-
- v8::Handle<Value> value;
-
- value = v8_compile("var p = 'as' + 'df';")->Run();
- value = v8_compile("obj[p];")->Run();
+ CHECK_EQ(43, access_count);
context1->Exit();
context0->Exit();
@@ -10869,16 +10972,29 @@ THREADED_TEST(VariousGetPropertiesAndThrowingCallbacks) {
try_catch.Reset();
CHECK(result.IsEmpty());
+ Maybe<PropertyAttribute> attr =
+ instance->GetRealNamedPropertyAttributes(v8_str("f"));
+ CHECK(!try_catch.HasCaught());
+ CHECK(Just(None) == attr);
+
result = another->GetRealNamedProperty(v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
CHECK(result.IsEmpty());
+ attr = another->GetRealNamedPropertyAttributes(v8_str("f"));
+ CHECK(!try_catch.HasCaught());
+ CHECK(Just(None) == attr);
+
result = another->GetRealNamedPropertyInPrototypeChain(v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
CHECK(result.IsEmpty());
+ attr = another->GetRealNamedPropertyAttributesInPrototypeChain(v8_str("f"));
+ CHECK(!try_catch.HasCaught());
+ CHECK(Just(None) == attr);
+
result = another->Get(v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
@@ -10889,6 +11005,10 @@ THREADED_TEST(VariousGetPropertiesAndThrowingCallbacks) {
try_catch.Reset();
CHECK(result.IsEmpty());
+ attr = with_js_getter->GetRealNamedPropertyAttributes(v8_str("f"));
+ CHECK(!try_catch.HasCaught());
+ CHECK(Just(None) == attr);
+
result = with_js_getter->Get(v8_str("f"));
CHECK(try_catch.HasCaught());
try_catch.Reset();
@@ -12676,61 +12796,13 @@ THREADED_TEST(PropertyEnumeration2) {
}
}
-static bool NamedSetAccessBlocker(Local<v8::Object> obj,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
- return type != v8::ACCESS_SET;
-}
-
-
-static bool IndexedSetAccessBlocker(Local<v8::Object> obj,
- uint32_t key,
- v8::AccessType type,
- Local<Value> data) {
- return type != v8::ACCESS_SET;
-}
-
-
-THREADED_TEST(DisableAccessChecksWhileConfiguring) {
- LocalContext context;
- v8::Isolate* isolate = context->GetIsolate();
- v8::HandleScope scope(isolate);
- Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetAccessCheckCallbacks(NamedSetAccessBlocker,
- IndexedSetAccessBlocker);
- templ->Set(v8_str("x"), v8::True(isolate));
- Local<v8::Object> instance = templ->NewInstance();
- context->Global()->Set(v8_str("obj"), instance);
- Local<Value> value = CompileRun("obj.x");
- CHECK(value->BooleanValue());
-}
-
-
-static bool NamedGetAccessBlocker(Local<v8::Object> obj,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
- return false;
-}
-
-
-static bool IndexedGetAccessBlocker(Local<v8::Object> obj,
- uint32_t key,
- v8::AccessType type,
- Local<Value> data) {
- return false;
-}
-
-
THREADED_TEST(AccessChecksReenabledCorrectly) {
LocalContext context;
v8::Isolate* isolate = context->GetIsolate();
v8::HandleScope scope(isolate);
Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
- templ->SetAccessCheckCallbacks(NamedGetAccessBlocker,
- IndexedGetAccessBlocker);
+ templ->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
templ->Set(v8_str("a"), v8_str("a"));
// Add more than 8 (see kMaxFastProperties) properties
// so that the constructor will force copying map.
@@ -12762,27 +12834,6 @@ THREADED_TEST(AccessChecksReenabledCorrectly) {
}
-// This tests that access check information remains on the global
-// object template when creating contexts.
-THREADED_TEST(AccessControlRepeatedContextCreation) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallbacks(NamedSetAccessBlocker,
- IndexedSetAccessBlocker);
- i::Handle<i::ObjectTemplateInfo> internal_template =
- v8::Utils::OpenHandle(*global_template);
- CHECK(!internal_template->constructor()->IsUndefined());
- i::Handle<i::FunctionTemplateInfo> constructor(
- i::FunctionTemplateInfo::cast(internal_template->constructor()));
- CHECK(!constructor->access_check_info()->IsUndefined());
- v8::Local<Context> context0(Context::New(isolate, NULL, global_template));
- CHECK(!context0.IsEmpty());
- CHECK(!constructor->access_check_info()->IsUndefined());
-}
-
-
THREADED_TEST(TurnOnAccessCheck) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
@@ -12791,10 +12842,8 @@ THREADED_TEST(TurnOnAccessCheck) {
// default.
v8::Handle<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallbacks(NamedGetAccessBlocker,
- IndexedGetAccessBlocker,
- v8::Handle<v8::Value>(),
- false);
+ global_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL,
+ v8::Handle<v8::Value>(), false);
v8::Local<Context> context = Context::New(isolate, NULL, global_template);
Context::Scope context_scope(context);
@@ -12850,109 +12899,6 @@ THREADED_TEST(TurnOnAccessCheck) {
}
-static const char* kPropertyA = "a";
-static const char* kPropertyH = "h";
-
-static bool NamedGetAccessBlockAandH(Local<v8::Object> obj,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
- if (!name->IsString()) return false;
- i::Handle<i::String> name_handle =
- v8::Utils::OpenHandle(String::Cast(*name));
- return !name_handle->IsUtf8EqualTo(i::CStrVector(kPropertyA))
- && !name_handle->IsUtf8EqualTo(i::CStrVector(kPropertyH));
-}
-
-
-THREADED_TEST(TurnOnAccessCheckAndRecompile) {
- v8::Isolate* isolate = CcTest::isolate();
- v8::HandleScope handle_scope(isolate);
-
- // Create an environment with access check to the global object disabled by
- // default. When the registered access checker will block access to properties
- // a and h.
- v8::Handle<v8::ObjectTemplate> global_template =
- v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallbacks(NamedGetAccessBlockAandH,
- IndexedGetAccessBlocker,
- v8::Handle<v8::Value>(),
- false);
- v8::Local<Context> context = Context::New(isolate, NULL, global_template);
- Context::Scope context_scope(context);
-
- // Set up a property and a number of functions.
- context->Global()->Set(v8_str("a"), v8_num(1));
- static const char* source = "function f1() {return a;}"
- "function f2() {return a;}"
- "function g1() {return h();}"
- "function g2() {return h();}"
- "function h() {return 1;}";
-
- CompileRun(source);
- Local<Function> f1;
- Local<Function> f2;
- Local<Function> g1;
- Local<Function> g2;
- Local<Function> h;
- f1 = Local<Function>::Cast(context->Global()->Get(v8_str("f1")));
- f2 = Local<Function>::Cast(context->Global()->Get(v8_str("f2")));
- g1 = Local<Function>::Cast(context->Global()->Get(v8_str("g1")));
- g2 = Local<Function>::Cast(context->Global()->Get(v8_str("g2")));
- h = Local<Function>::Cast(context->Global()->Get(v8_str("h")));
-
- // Get the global object.
- v8::Handle<v8::Object> global = context->Global();
-
- // Call f1 one time and f2 a number of times. This will ensure that f1 still
- // uses the runtime system to retreive property a whereas f2 uses global load
- // inline cache.
- CHECK(f1->Call(global, 0, NULL)->Equals(v8_num(1)));
- for (int i = 0; i < 4; i++) {
- CHECK(f2->Call(global, 0, NULL)->Equals(v8_num(1)));
- }
-
- // Same for g1 and g2.
- CHECK(g1->Call(global, 0, NULL)->Equals(v8_num(1)));
- for (int i = 0; i < 4; i++) {
- CHECK(g2->Call(global, 0, NULL)->Equals(v8_num(1)));
- }
-
- // Detach the global and turn on access check now blocking access to property
- // a and function h.
- Local<Object> hidden_global = Local<Object>::Cast(
- context->Global()->GetPrototype());
- context->DetachGlobal();
- hidden_global->TurnOnAccessCheck();
-
- // Failing access check results in exception.
- CHECK(f1->Call(global, 0, NULL).IsEmpty());
- CHECK(f2->Call(global, 0, NULL).IsEmpty());
- CHECK(g1->Call(global, 0, NULL).IsEmpty());
- CHECK(g2->Call(global, 0, NULL).IsEmpty());
-
- // No failing access check when just returning a constant.
- CHECK(h->Call(global, 0, NULL)->Equals(v8_num(1)));
-
- // Now compile the source again. And get the newly compiled functions, except
- // for h for which access is blocked.
- CompileRun(source);
- f1 = Local<Function>::Cast(hidden_global->Get(v8_str("f1")));
- f2 = Local<Function>::Cast(hidden_global->Get(v8_str("f2")));
- g1 = Local<Function>::Cast(hidden_global->Get(v8_str("g1")));
- g2 = Local<Function>::Cast(hidden_global->Get(v8_str("g2")));
- CHECK(hidden_global->Get(v8_str("h")).IsEmpty());
-
- // Failing access check results in exception.
- v8::Local<v8::Value> result = f1->Call(global, 0, NULL);
- CHECK(result.IsEmpty());
- CHECK(f1->Call(global, 0, NULL).IsEmpty());
- CHECK(f2->Call(global, 0, NULL).IsEmpty());
- CHECK(g1->Call(global, 0, NULL).IsEmpty());
- CHECK(g2->Call(global, 0, NULL).IsEmpty());
-}
-
-
// Tests that ScriptData can be serialized and deserialized.
TEST(PreCompileSerialization) {
v8::V8::Initialize();
@@ -15223,6 +15169,54 @@ TEST(CaptureStackTraceForUncaughtExceptionAndSetters) {
}
+static void StackTraceFunctionNameListener(v8::Handle<v8::Message> message,
+ v8::Handle<Value>) {
+ v8::Handle<v8::StackTrace> stack_trace = message->GetStackTrace();
+ CHECK_EQ(5, stack_trace->GetFrameCount());
+ checkStackFrame("origin", "foo:0", 4, 7, false, false,
+ stack_trace->GetFrame(0));
+ checkStackFrame("origin", "foo:1", 5, 27, false, false,
+ stack_trace->GetFrame(1));
+ checkStackFrame("origin", "foo", 5, 27, false, false,
+ stack_trace->GetFrame(2));
+ checkStackFrame("origin", "foo", 5, 27, false, false,
+ stack_trace->GetFrame(3));
+ checkStackFrame("origin", "", 1, 14, false, false, stack_trace->GetFrame(4));
+}
+
+
+TEST(GetStackTraceContainsFunctionsWithFunctionName) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ CompileRunWithOrigin(
+ "function gen(name, counter) {\n"
+ " var f = function foo() {\n"
+ " if (counter === 0)\n"
+ " throw 1;\n"
+ " gen(name, counter - 1)();\n"
+ " };\n"
+ " if (counter == 3) {\n"
+ " Object.defineProperty(f, 'name', {get: function(){ throw 239; }});\n"
+ " } else {\n"
+ " Object.defineProperty(f, 'name', {writable:true});\n"
+ " if (counter == 2)\n"
+ " f.name = 42;\n"
+ " else\n"
+ " f.name = name + ':' + counter;\n"
+ " }\n"
+ " return f;\n"
+ "};",
+ "origin");
+
+ v8::V8::AddMessageListener(StackTraceFunctionNameListener);
+ v8::V8::SetCaptureStackTraceForUncaughtExceptions(true);
+ CompileRunWithOrigin("gen('foo', 3)();", "origin");
+ v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
+ v8::V8::RemoveMessageListeners(StackTraceFunctionNameListener);
+}
+
+
static void RethrowStackTraceHandler(v8::Handle<v8::Message> message,
v8::Handle<v8::Value> data) {
// Use the frame where JavaScript is called from.
@@ -15991,30 +15985,10 @@ static void CreateGarbageInOldSpace() {
}
-// Test that idle notification can be handled and eventually returns true.
-TEST(IdleNotification) {
- const intptr_t MB = 1024 * 1024;
- const int IdlePauseInMs = 1000;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- intptr_t initial_size = CcTest::heap()->SizeOfObjects();
- CreateGarbageInOldSpace();
- intptr_t size_with_garbage = CcTest::heap()->SizeOfObjects();
- CHECK_GT(size_with_garbage, initial_size + MB);
- bool finished = false;
- for (int i = 0; i < 200 && !finished; i++) {
- finished = env->GetIsolate()->IdleNotification(IdlePauseInMs);
- }
- intptr_t final_size = CcTest::heap()->SizeOfObjects();
- CHECK(finished);
- CHECK_LT(final_size, initial_size + 1);
-}
-
-
// Test that idle notification can be handled and eventually collects garbage.
-TEST(IdleNotificationWithSmallHint) {
+TEST(TestIdleNotification) {
const intptr_t MB = 1024 * 1024;
- const int IdlePauseInMs = 900;
+ const double IdlePauseInSeconds = 1.0;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
intptr_t initial_size = CcTest::heap()->SizeOfObjects();
@@ -16023,27 +15997,10 @@ TEST(IdleNotificationWithSmallHint) {
CHECK_GT(size_with_garbage, initial_size + MB);
bool finished = false;
for (int i = 0; i < 200 && !finished; i++) {
- finished = env->GetIsolate()->IdleNotification(IdlePauseInMs);
- }
- intptr_t final_size = CcTest::heap()->SizeOfObjects();
- CHECK(finished);
- CHECK_LT(final_size, initial_size + 1);
-}
-
-
-// Test that idle notification can be handled and eventually collects garbage.
-TEST(IdleNotificationWithLargeHint) {
- const intptr_t MB = 1024 * 1024;
- const int IdlePauseInMs = 900;
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
- intptr_t initial_size = CcTest::heap()->SizeOfObjects();
- CreateGarbageInOldSpace();
- intptr_t size_with_garbage = CcTest::heap()->SizeOfObjects();
- CHECK_GT(size_with_garbage, initial_size + MB);
- bool finished = false;
- for (int i = 0; i < 200 && !finished; i++) {
- finished = env->GetIsolate()->IdleNotification(IdlePauseInMs);
+ finished = env->GetIsolate()->IdleNotificationDeadline(
+ (v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
+ static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
+ IdlePauseInSeconds);
}
intptr_t final_size = CcTest::heap()->SizeOfObjects();
CHECK(finished);
@@ -16188,7 +16145,7 @@ TEST(ExternalizeOldSpaceTwoByteCons) {
CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
CcTest::heap()->CollectAllAvailableGarbage();
CHECK(CcTest::heap()->old_pointer_space()->Contains(
- *v8::Utils::OpenHandle(*cons)));
+ *v8::Utils::OpenHandle(*cons)));
TestResource* resource = new TestResource(
AsciiToTwoByteString("Romeo Montague Juliet Capulet"));
@@ -16211,7 +16168,7 @@ TEST(ExternalizeOldSpaceOneByteCons) {
CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
CcTest::heap()->CollectAllAvailableGarbage();
CHECK(CcTest::heap()->old_pointer_space()->Contains(
- *v8::Utils::OpenHandle(*cons)));
+ *v8::Utils::OpenHandle(*cons)));
TestOneByteResource* resource =
new TestOneByteResource(i::StrDup("Romeo Montague Juliet Capulet"));
@@ -16501,6 +16458,7 @@ THREADED_TEST(SpaghettiStackReThrow) {
TEST(Regress528) {
v8::V8::Initialize();
v8::Isolate* isolate = CcTest::isolate();
+ i::FLAG_retain_maps_for_n_gc = 0;
v8::HandleScope scope(isolate);
v8::Local<Context> other_context;
int gc_count;
@@ -16593,7 +16551,8 @@ THREADED_TEST(ScriptOrigin) {
v8::String::NewFromUtf8(env->GetIsolate(), "test"),
v8::Integer::New(env->GetIsolate(), 1),
v8::Integer::New(env->GetIsolate(), 1), v8::True(env->GetIsolate()),
- v8::Handle<v8::Integer>(), v8::True(env->GetIsolate()));
+ v8::Handle<v8::Integer>(), v8::True(env->GetIsolate()),
+ v8::String::NewFromUtf8(env->GetIsolate(), "http://sourceMapUrl"));
v8::Handle<v8::String> script = v8::String::NewFromUtf8(
env->GetIsolate(), "function f() {}\n\nfunction g() {}");
v8::Script::Compile(script, &origin)->Run();
@@ -16608,6 +16567,10 @@ THREADED_TEST(ScriptOrigin) {
CHECK_EQ(1, script_origin_f.ResourceLineOffset()->Int32Value());
CHECK(script_origin_f.ResourceIsSharedCrossOrigin()->Value());
CHECK(script_origin_f.ResourceIsEmbedderDebugScript()->Value());
+ printf("is name = %d\n", script_origin_f.SourceMapUrl()->IsUndefined());
+
+ CHECK_EQ(0, strcmp("http://sourceMapUrl",
+ *v8::String::Utf8Value(script_origin_f.SourceMapUrl())));
v8::ScriptOrigin script_origin_g = g->GetScriptOrigin();
CHECK_EQ(0, strcmp("test",
@@ -16615,6 +16578,8 @@ THREADED_TEST(ScriptOrigin) {
CHECK_EQ(1, script_origin_g.ResourceLineOffset()->Int32Value());
CHECK(script_origin_g.ResourceIsSharedCrossOrigin()->Value());
CHECK(script_origin_g.ResourceIsEmbedderDebugScript()->Value());
+ CHECK_EQ(0, strcmp("http://sourceMapUrl",
+ *v8::String::Utf8Value(script_origin_g.SourceMapUrl())));
}
@@ -17457,10 +17422,8 @@ TEST(GCInFailedAccessCheckCallback) {
// check callbacks that will block access.
v8::Handle<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallbacks(NamedGetAccessBlocker,
- IndexedGetAccessBlocker,
- v8::Handle<v8::Value>(),
- false);
+ global_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL,
+ v8::Handle<v8::Value>(), false);
// Create a context and set an x property on it's global object.
LocalContext context0(NULL, global_template);
@@ -18230,18 +18193,22 @@ THREADED_TEST(CreationContext) {
instance2 = func2->NewInstance();
}
- CHECK(object1->CreationContext() == context1);
- CheckContextId(object1, 1);
- CHECK(func1->CreationContext() == context1);
- CheckContextId(func1, 1);
- CHECK(instance1->CreationContext() == context1);
- CheckContextId(instance1, 1);
- CHECK(object2->CreationContext() == context2);
- CheckContextId(object2, 2);
- CHECK(func2->CreationContext() == context2);
- CheckContextId(func2, 2);
- CHECK(instance2->CreationContext() == context2);
- CheckContextId(instance2, 2);
+ {
+ Handle<Context> other_context = Context::New(isolate);
+ Context::Scope scope(other_context);
+ CHECK(object1->CreationContext() == context1);
+ CheckContextId(object1, 1);
+ CHECK(func1->CreationContext() == context1);
+ CheckContextId(func1, 1);
+ CHECK(instance1->CreationContext() == context1);
+ CheckContextId(instance1, 1);
+ CHECK(object2->CreationContext() == context2);
+ CheckContextId(object2, 2);
+ CHECK(func2->CreationContext() == context2);
+ CheckContextId(func2, 2);
+ CHECK(instance2->CreationContext() == context2);
+ CheckContextId(instance2, 2);
+ }
{
Context::Scope scope(context1);
@@ -18288,6 +18255,8 @@ THREADED_TEST(CreationContextOfJsFunction) {
function = CompileRun("function foo() {}; foo").As<Object>();
}
+ Handle<Context> other_context = Context::New(CcTest::isolate());
+ Context::Scope scope(other_context);
CHECK(function->CreationContext() == context);
CheckContextId(function, 1);
}
@@ -18596,33 +18565,13 @@ THREADED_TEST(Regress1516) {
}
-static bool BlockProtoNamedSecurityTestCallback(Local<v8::Object> global,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
- // Only block read access to __proto__.
- if (type == v8::ACCESS_GET && name->IsString() &&
- name.As<v8::String>()->Length() == 9 &&
- name.As<v8::String>()->Utf8Length() == 9) {
- char buffer[10];
- CHECK_EQ(10, name.As<v8::String>()->WriteUtf8(buffer));
- return strncmp(buffer, "__proto__", 9) != 0;
- }
-
- return true;
-}
-
-
THREADED_TEST(Regress93759) {
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
// Template for object with security check.
Local<ObjectTemplate> no_proto_template = v8::ObjectTemplate::New(isolate);
- // We don't do indexing, so any callback can be used for that.
- no_proto_template->SetAccessCheckCallbacks(
- BlockProtoNamedSecurityTestCallback,
- IndexedSecurityTestCallback);
+ no_proto_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
// Templates for objects with hidden prototypes and possibly security check.
Local<FunctionTemplate> hidden_proto_template =
@@ -18632,8 +18581,7 @@ THREADED_TEST(Regress93759) {
Local<FunctionTemplate> protected_hidden_proto_template =
v8::FunctionTemplate::New(isolate);
protected_hidden_proto_template->InstanceTemplate()->SetAccessCheckCallbacks(
- BlockProtoNamedSecurityTestCallback,
- IndexedSecurityTestCallback);
+ AccessAlwaysBlocked, NULL);
protected_hidden_proto_template->SetHiddenPrototype(true);
// Context for "foreign" objects used in test.
@@ -18644,12 +18592,10 @@ THREADED_TEST(Regress93759) {
Local<Object> simple_object = Object::New(isolate);
// Object with explicit security check.
- Local<Object> protected_object =
- no_proto_template->NewInstance();
+ Local<Object> protected_object = no_proto_template->NewInstance();
// JSGlobalProxy object, always have security check.
- Local<Object> proxy_object =
- context->Global();
+ Local<Object> proxy_object = context->Global();
// Global object, the prototype of proxy_object. No security checks.
Local<Object> global_object = proxy_object->GetPrototype()->ToObject(isolate);
@@ -19815,24 +19761,6 @@ THREADED_TEST(SemaphoreInterruption) {
#endif // V8_OS_POSIX
-static bool NamedAccessAlwaysBlocked(Local<v8::Object> global,
- Local<Value> name,
- v8::AccessType type,
- Local<Value> data) {
- i::PrintF("Named access blocked.\n");
- return false;
-}
-
-
-static bool IndexAccessAlwaysBlocked(Local<v8::Object> global,
- uint32_t key,
- v8::AccessType type,
- Local<Value> data) {
- i::PrintF("Indexed access blocked.\n");
- return false;
-}
-
-
void UnreachableCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
CHECK(false);
}
@@ -19847,8 +19775,7 @@ TEST(JSONStringifyAccessCheck) {
// check callbacks that will block access.
v8::Handle<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallbacks(NamedAccessAlwaysBlocked,
- IndexAccessAlwaysBlocked);
+ global_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
// Create a context and set an x property on it's global object.
LocalContext context0(NULL, global_template);
@@ -19946,8 +19873,7 @@ TEST(AccessCheckThrows) {
// check callbacks that will block access.
v8::Handle<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate);
- global_template->SetAccessCheckCallbacks(NamedAccessAlwaysBlocked,
- IndexAccessAlwaysBlocked);
+ global_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
// Create a context and set an x property on it's global object.
LocalContext context0(NULL, global_template);
@@ -20100,6 +20026,7 @@ class RequestInterruptTestWithFunctionCall
isolate_, ShouldContinueCallback, v8::External::New(isolate_, this));
env_->Global()->Set(v8_str("ShouldContinue"), func);
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
CompileRun("while (ShouldContinue()) { }");
}
};
@@ -20115,6 +20042,7 @@ class RequestInterruptTestWithMethodCall
isolate_, ShouldContinueCallback, v8::External::New(isolate_, this)));
env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
CompileRun("var obj = new Klass; while (obj.shouldContinue()) { }");
}
};
@@ -20130,6 +20058,7 @@ class RequestInterruptTestWithAccessor
isolate_, ShouldContinueCallback, v8::External::New(isolate_, this)));
env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
CompileRun("var obj = new Klass; while (obj.shouldContinue) { }");
}
};
@@ -20147,6 +20076,7 @@ class RequestInterruptTestWithNativeAccessor
v8::External::New(isolate_, this));
env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
CompileRun("var obj = new Klass; while (obj.shouldContinue) { }");
}
@@ -20176,6 +20106,7 @@ class RequestInterruptTestWithMethodCallAndInterceptor
env_->Global()->Set(v8_str("Klass"), t->GetFunction());
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
CompileRun("var obj = new Klass; while (obj.shouldContinue()) { }");
}
@@ -20200,6 +20131,7 @@ class RequestInterruptTestWithMathAbs
v8::External::New(isolate_, this)));
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
CompileRun("function loopish(o) {"
" var pre = 10;"
" while (o.abs(1) > 0) {"
@@ -20283,6 +20215,7 @@ class RequestMultipleInterrupts : public RequestInterruptTestBase {
isolate_, ShouldContinueCallback, v8::External::New(isolate_, this));
env_->Global()->Set(v8_str("ShouldContinue"), func);
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
CompileRun("while (ShouldContinue()) { }");
}
@@ -21002,42 +20935,42 @@ TEST(Regress354123) {
v8::HandleScope scope(isolate);
v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
- templ->SetAccessCheckCallbacks(NamedAccessCounter, IndexedAccessCounter);
+ templ->SetAccessCheckCallbacks(AccessCounter, NULL);
current->Global()->Set(v8_str("friend"), templ->NewInstance());
// Test access using __proto__ from the prototype chain.
- named_access_count = 0;
+ access_count = 0;
CompileRun("friend.__proto__ = {};");
- CHECK_EQ(2, named_access_count);
+ CHECK_EQ(2, access_count);
CompileRun("friend.__proto__;");
- CHECK_EQ(4, named_access_count);
+ CHECK_EQ(4, access_count);
// Test access using __proto__ as a hijacked function (A).
- named_access_count = 0;
+ access_count = 0;
CompileRun("var p = Object.prototype;"
"var f = Object.getOwnPropertyDescriptor(p, '__proto__').set;"
"f.call(friend, {});");
- CHECK_EQ(1, named_access_count);
+ CHECK_EQ(1, access_count);
CompileRun("var p = Object.prototype;"
"var f = Object.getOwnPropertyDescriptor(p, '__proto__').get;"
"f.call(friend);");
- CHECK_EQ(2, named_access_count);
+ CHECK_EQ(2, access_count);
// Test access using __proto__ as a hijacked function (B).
- named_access_count = 0;
+ access_count = 0;
CompileRun("var f = Object.prototype.__lookupSetter__('__proto__');"
"f.call(friend, {});");
- CHECK_EQ(1, named_access_count);
+ CHECK_EQ(1, access_count);
CompileRun("var f = Object.prototype.__lookupGetter__('__proto__');"
"f.call(friend);");
- CHECK_EQ(2, named_access_count);
+ CHECK_EQ(2, access_count);
// Test access using Object.setPrototypeOf reflective method.
- named_access_count = 0;
+ access_count = 0;
CompileRun("Object.setPrototypeOf(friend, {});");
- CHECK_EQ(1, named_access_count);
+ CHECK_EQ(1, access_count);
CompileRun("Object.getPrototypeOf(friend);");
- CHECK_EQ(2, named_access_count);
+ CHECK_EQ(2, access_count);
}
@@ -21192,8 +21125,7 @@ TEST(Regress411877) {
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
- object_template->SetAccessCheckCallbacks(NamedAccessCounter,
- IndexedAccessCounter);
+ object_template->SetAccessCheckCallbacks(AccessCounter, NULL);
v8::Handle<Context> context = Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -21208,8 +21140,7 @@ TEST(GetHiddenPropertyTableAfterAccessCheck) {
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
- object_template->SetAccessCheckCallbacks(NamedAccessCounter,
- IndexedAccessCounter);
+ object_template->SetAccessCheckCallbacks(AccessCounter, NULL);
v8::Handle<Context> context = Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -21227,8 +21158,7 @@ TEST(Regress411793) {
v8::HandleScope handle_scope(isolate);
v8::Handle<v8::ObjectTemplate> object_template =
v8::ObjectTemplate::New(isolate);
- object_template->SetAccessCheckCallbacks(NamedAccessCounter,
- IndexedAccessCounter);
+ object_template->SetAccessCheckCallbacks(AccessCounter, NULL);
v8::Handle<Context> context = Context::New(isolate);
v8::Context::Scope context_scope(context);
@@ -21806,6 +21736,7 @@ TEST(TurboAsmDisablesNeuter) {
"Module(this, {}, buffer).load();"
"buffer";
+ i::FLAG_turbo_osr = false; // TODO(titzer): test requires eager TF.
v8::Local<v8::ArrayBuffer> result = CompileRun(load).As<v8::ArrayBuffer>();
CHECK_EQ(should_be_neuterable, result->IsNeuterable());
@@ -21820,6 +21751,7 @@ TEST(TurboAsmDisablesNeuter) {
"Module(this, {}, buffer).store();"
"buffer";
+ i::FLAG_turbo_osr = false; // TODO(titzer): test requires eager TF.
result = CompileRun(store).As<v8::ArrayBuffer>();
CHECK_EQ(should_be_neuterable, result->IsNeuterable());
}
@@ -21833,8 +21765,7 @@ TEST(GetPrototypeAccessControl) {
v8::Handle<v8::ObjectTemplate> obj_template =
v8::ObjectTemplate::New(isolate);
- obj_template->SetAccessCheckCallbacks(BlockEverythingNamed,
- BlockEverythingIndexed);
+ obj_template->SetAccessCheckCallbacks(AccessAlwaysBlocked, NULL);
env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
@@ -21917,7 +21848,6 @@ TEST(StreamingScriptWithSourceMappingURLInTheMiddle) {
TEST(NewStringRangeError) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope handle_scope(isolate);
- LocalContext env;
const int length = i::String::kMaxLength + 1;
const int buffer_size = length * sizeof(uint16_t);
void* buffer = malloc(buffer_size);
@@ -21928,21 +21858,21 @@ TEST(NewStringRangeError) {
char* data = reinterpret_cast<char*>(buffer);
CHECK(v8::String::NewFromUtf8(isolate, data, v8::String::kNormalString,
length).IsEmpty());
- CHECK(try_catch.HasCaught());
+ CHECK(!try_catch.HasCaught());
}
{
v8::TryCatch try_catch;
uint8_t* data = reinterpret_cast<uint8_t*>(buffer);
CHECK(v8::String::NewFromOneByte(isolate, data, v8::String::kNormalString,
length).IsEmpty());
- CHECK(try_catch.HasCaught());
+ CHECK(!try_catch.HasCaught());
}
{
v8::TryCatch try_catch;
uint16_t* data = reinterpret_cast<uint16_t*>(buffer);
CHECK(v8::String::NewFromTwoByte(isolate, data, v8::String::kNormalString,
length).IsEmpty());
- CHECK(try_catch.HasCaught());
+ CHECK(!try_catch.HasCaught());
}
free(buffer);
}
diff --git a/deps/v8/test/cctest/test-array-list.cc b/deps/v8/test/cctest/test-array-list.cc
new file mode 100644
index 0000000000..2852043b2f
--- /dev/null
+++ b/deps/v8/test/cctest/test-array-list.cc
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "test/cctest/cctest.h"
+
+namespace {
+
+using namespace v8::internal;
+
+
+TEST(ArrayList) {
+ LocalContext context;
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Handle<ArrayList> array(
+ ArrayList::cast(isolate->heap()->empty_fixed_array()));
+ CHECK_EQ(0, array->Length());
+ array = ArrayList::Add(array, handle(Smi::FromInt(100), isolate));
+ CHECK_EQ(1, array->Length());
+ CHECK_EQ(100, Smi::cast(array->Get(0))->value());
+ array = ArrayList::Add(array, handle(Smi::FromInt(200), isolate),
+ handle(Smi::FromInt(300), isolate));
+ CHECK_EQ(3, array->Length());
+ CHECK_EQ(100, Smi::cast(array->Get(0))->value());
+ CHECK_EQ(200, Smi::cast(array->Get(1))->value());
+ CHECK_EQ(300, Smi::cast(array->Get(2))->value());
+ array->Set(2, Smi::FromInt(400));
+ CHECK_EQ(400, Smi::cast(array->Get(2))->value());
+ array->Clear(2, isolate->heap()->undefined_value());
+ array->SetLength(2);
+ CHECK_EQ(2, array->Length());
+ CHECK_EQ(100, Smi::cast(array->Get(0))->value());
+ CHECK_EQ(200, Smi::cast(array->Get(1))->value());
+}
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index df8477ba9f..e54c4894b7 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -11223,3 +11223,173 @@ TEST(pool_size) {
TEARDOWN();
}
+
+
+TEST(jump_tables_forward) {
+ // Test jump tables with forward jumps.
+ const int kNumCases = 512;
+
+ INIT_V8();
+ SETUP_SIZE(kNumCases * 5 * kInstructionSize + 8192);
+ START();
+
+ int32_t values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ int32_t results[kNumCases];
+ memset(results, 0, sizeof(results));
+ uintptr_t results_ptr = reinterpret_cast<uintptr_t>(results);
+
+ Label loop;
+ Label labels[kNumCases];
+ Label done;
+
+ const Register& index = x0;
+ STATIC_ASSERT(sizeof(results[0]) == 4);
+ const Register& value = w1;
+ const Register& target = x2;
+
+ __ Mov(index, 0);
+ __ Mov(target, results_ptr);
+ __ Bind(&loop);
+
+ {
+ Assembler::BlockPoolsScope block_pools(&masm);
+ Label base;
+
+ __ Adr(x10, &base);
+ __ Ldr(x11, MemOperand(x10, index, LSL, kPointerSizeLog2));
+ __ Br(x11);
+ __ Bind(&base);
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dcptr(&labels[i]);
+ }
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ Bind(&labels[i]);
+ __ Mov(value, values[i]);
+ __ B(&done);
+ }
+
+ __ Bind(&done);
+ __ Str(value, MemOperand(target, 4, PostIndex));
+ __ Add(index, index, 1);
+ __ Cmp(index, kNumCases);
+ __ B(ne, &loop);
+
+ END();
+
+ RUN();
+
+ for (int i = 0; i < kNumCases; ++i) {
+ CHECK_EQ(values[i], results[i]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(jump_tables_backward) {
+ // Test jump tables with backward jumps.
+ const int kNumCases = 512;
+
+ INIT_V8();
+ SETUP_SIZE(kNumCases * 5 * kInstructionSize + 8192);
+ START();
+
+ int32_t values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ int32_t results[kNumCases];
+ memset(results, 0, sizeof(results));
+ uintptr_t results_ptr = reinterpret_cast<uintptr_t>(results);
+
+ Label loop;
+ Label labels[kNumCases];
+ Label done;
+
+ const Register& index = x0;
+ STATIC_ASSERT(sizeof(results[0]) == 4);
+ const Register& value = w1;
+ const Register& target = x2;
+
+ __ Mov(index, 0);
+ __ Mov(target, results_ptr);
+ __ B(&loop);
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ Bind(&labels[i]);
+ __ Mov(value, values[i]);
+ __ B(&done);
+ }
+
+ __ Bind(&loop);
+ {
+ Assembler::BlockPoolsScope block_pools(&masm);
+ Label base;
+
+ __ Adr(x10, &base);
+ __ Ldr(x11, MemOperand(x10, index, LSL, kPointerSizeLog2));
+ __ Br(x11);
+ __ Bind(&base);
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dcptr(&labels[i]);
+ }
+ }
+
+ __ Bind(&done);
+ __ Str(value, MemOperand(target, 4, PostIndex));
+ __ Add(index, index, 1);
+ __ Cmp(index, kNumCases);
+ __ B(ne, &loop);
+
+ END();
+
+ RUN();
+
+ for (int i = 0; i < kNumCases; ++i) {
+ CHECK_EQ(values[i], results[i]);
+ }
+
+ TEARDOWN();
+}
+
+
+TEST(internal_reference_linked) {
+ // Test internal reference when they are linked in a label chain.
+
+ INIT_V8();
+ SETUP();
+ START();
+
+ Label done;
+
+ __ Mov(x0, 0);
+ __ Cbnz(x0, &done);
+
+ {
+ Assembler::BlockPoolsScope block_pools(&masm);
+ Label base;
+
+ __ Adr(x10, &base);
+ __ Ldr(x11, MemOperand(x10));
+ __ Br(x11);
+ __ Bind(&base);
+ __ dcptr(&done);
+ }
+
+ // Dead code, just to extend the label chain.
+ __ B(&done);
+ __ dcptr(&done);
+ __ Tbz(x0, 1, &done);
+
+ __ Bind(&done);
+ __ Mov(x0, 1);
+
+ END();
+
+ RUN();
+
+ CHECK_EQUAL_64(0x1, x0);
+
+ TEARDOWN();
+}
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 46592a05d1..5a7ad0294a 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -34,7 +34,6 @@
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index d4cabbcf72..e55fb24282 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -1585,7 +1585,7 @@ TEST(jump_tables3) {
__ bind(&done);
__ ld(ra, MemOperand(sp));
- __ addiu(sp, sp, 8);
+ __ daddiu(sp, sp, 8);
__ jr(ra);
__ nop();
@@ -1594,7 +1594,7 @@ TEST(jump_tables3) {
Handle<Code> code = isolate->factory()->NewCode(
desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
#ifdef OBJECT_PRINT
-// code->Print(std::cout);
+ code->Print(std::cout);
#endif
F1 f = FUNCTION_CAST<F1>(code->entry());
for (int i = 0; i < kNumCases; ++i) {
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index f5d59ded9b..ca88309bb6 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -34,7 +34,6 @@
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-assembler-x87.cc b/deps/v8/test/cctest/test-assembler-x87.cc
index c07be845b9..f83bbbd0d8 100644
--- a/deps/v8/test/cctest/test-assembler-x87.cc
+++ b/deps/v8/test/cctest/test-assembler-x87.cc
@@ -34,7 +34,6 @@
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index faf533239e..ed8b1c6d99 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -60,7 +60,7 @@ static Handle<JSFunction> Compile(const char* source) {
Handle<String> source_code = isolate->factory()->NewStringFromUtf8(
CStrVector(source)).ToHandleChecked();
Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
- source_code, Handle<String>(), 0, 0, false, false,
+ source_code, Handle<String>(), 0, 0, false, false, Handle<Object>(),
Handle<Context>(isolate->native_context()), NULL, NULL,
v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE, false);
return isolate->factory()->NewFunctionFromSharedFunctionInfo(
diff --git a/deps/v8/test/cctest/test-constantpool.cc b/deps/v8/test/cctest/test-constantpool.cc
index 453657609e..8b9b2cf73a 100644
--- a/deps/v8/test/cctest/test-constantpool.cc
+++ b/deps/v8/test/cctest/test-constantpool.cc
@@ -284,8 +284,9 @@ TEST(ConstantPoolCompacting) {
Page* first_page = heap->old_data_space()->anchor()->next_page();
{
HandleScope scope(isolate);
+ int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
Handle<HeapObject> temp =
- factory->NewFixedDoubleArray(900 * KB / kDoubleSize, TENURED);
+ factory->NewFixedDoubleArray(dummy_array_size / kDoubleSize, TENURED);
CHECK(heap->InOldDataSpace(temp->address()));
Handle<HeapObject> heap_ptr =
factory->NewHeapNumber(5.0, IMMUTABLE, TENURED);
diff --git a/deps/v8/test/cctest/test-conversions.cc b/deps/v8/test/cctest/test-conversions.cc
index b7881edcf6..789425119d 100644
--- a/deps/v8/test/cctest/test-conversions.cc
+++ b/deps/v8/test/cctest/test-conversions.cc
@@ -362,3 +362,58 @@ TEST(BitField64) {
CHECK(x == MiddleBits::encode(3));
CHECK_EQ(3, MiddleBits::decode(x));
}
+
+
+static void CheckNonArrayIndex(bool expected, const char* chars) {
+ auto isolate = CcTest::i_isolate();
+ auto string = isolate->factory()->NewStringFromAsciiChecked(chars);
+ CHECK_EQ(expected, IsNonArrayIndexInteger(*string));
+}
+
+
+TEST(NonArrayIndexParsing) {
+ auto isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ CheckNonArrayIndex(false, "");
+ CheckNonArrayIndex(false, "-");
+ CheckNonArrayIndex(false, "0");
+ CheckNonArrayIndex(false, "01");
+ CheckNonArrayIndex(false, "-01");
+ CheckNonArrayIndex(false, "4294967295");
+ CheckNonArrayIndex(false, "429496.7295");
+ CheckNonArrayIndex(false, "43s3");
+ CheckNonArrayIndex(true, "-0");
+ CheckNonArrayIndex(true, "-1");
+ CheckNonArrayIndex(true, "4294967296");
+ CheckNonArrayIndex(true, "-4294967296");
+ CheckNonArrayIndex(
+ true,
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296");
+ CheckNonArrayIndex(
+ true,
+ "-429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296"
+ "429496729642949672964294967296429496729642949672964294967296");
+}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index e2b6db0b96..a8cbdd505c 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -56,6 +56,16 @@ static v8::Local<v8::Function> GetFunction(v8::Context* env, const char* name) {
}
+static int offset(const char* src, const char* substring) {
+ return static_cast<int>(strstr(src, substring) - src);
+}
+
+
+static const char* reason(const i::Deoptimizer::DeoptReason reason) {
+ return i::Deoptimizer::GetDeoptReason(reason);
+}
+
+
TEST(StartStop) {
i::Isolate* isolate = CcTest::i_isolate();
CpuProfilesCollection profiles(isolate->heap());
@@ -433,8 +443,7 @@ static v8::CpuProfile* RunProfiler(
static bool ContainsString(v8::Handle<v8::String> string,
const Vector<v8::Handle<v8::String> >& vector) {
for (int i = 0; i < vector.length(); i++) {
- if (string->Equals(vector[i]))
- return true;
+ if (string->Equals(vector[i])) return true;
}
return false;
}
@@ -445,18 +454,31 @@ static void CheckChildrenNames(const v8::CpuProfileNode* node,
int count = node->GetChildrenCount();
for (int i = 0; i < count; i++) {
v8::Handle<v8::String> name = node->GetChild(i)->GetFunctionName();
- CHECK(ContainsString(name, names));
+ if (!ContainsString(name, names)) {
+ char buffer[100];
+ i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
+ "Unexpected child '%s' found in '%s'",
+ *v8::String::Utf8Value(name),
+ *v8::String::Utf8Value(node->GetFunctionName()));
+ FATAL(buffer);
+ }
// Check that there are no duplicates.
for (int j = 0; j < count; j++) {
if (j == i) continue;
- CHECK(!name->Equals(node->GetChild(j)->GetFunctionName()));
+ if (name->Equals(node->GetChild(j)->GetFunctionName())) {
+ char buffer[100];
+ i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
+ "Second child with the same name '%s' found in '%s'",
+ *v8::String::Utf8Value(name),
+ *v8::String::Utf8Value(node->GetFunctionName()));
+ FATAL(buffer);
+ }
}
}
}
-static const v8::CpuProfileNode* FindChild(v8::Isolate* isolate,
- const v8::CpuProfileNode* node,
+static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
const char* name) {
int count = node->GetChildrenCount();
v8::Handle<v8::String> nameHandle = v8_str(name);
@@ -468,10 +490,9 @@ static const v8::CpuProfileNode* FindChild(v8::Isolate* isolate,
}
-static const v8::CpuProfileNode* GetChild(v8::Isolate* isolate,
- const v8::CpuProfileNode* node,
+static const v8::CpuProfileNode* GetChild(const v8::CpuProfileNode* node,
const char* name) {
- const v8::CpuProfileNode* result = FindChild(isolate, node, name);
+ const v8::CpuProfileNode* result = FindChild(node, name);
if (!result) {
char buffer[100];
i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
@@ -482,26 +503,24 @@ static const v8::CpuProfileNode* GetChild(v8::Isolate* isolate,
}
-static void CheckSimpleBranch(v8::Isolate* isolate,
- const v8::CpuProfileNode* node,
+static void CheckSimpleBranch(const v8::CpuProfileNode* node,
const char* names[], int length) {
for (int i = 0; i < length; i++) {
const char* name = names[i];
- node = GetChild(isolate, node, name);
+ node = GetChild(node, name);
int expectedChildrenCount = (i == length - 1) ? 0 : 1;
CHECK_EQ(expectedChildrenCount, node->GetChildrenCount());
}
}
-static const v8::CpuProfileNode* GetSimpleBranch(v8::Isolate* isolate,
- const v8::CpuProfileNode* node,
- const char* names[],
- int length) {
+static const ProfileNode* GetSimpleBranch(v8::CpuProfile* profile,
+ const char* names[], int length) {
+ const v8::CpuProfileNode* node = profile->GetTopDownRoot();
for (int i = 0; i < length; i++) {
- node = GetChild(isolate, node, names[i]);
+ node = GetChild(node, names[i]);
}
- return node;
+ return reinterpret_cast<const ProfileNode*>(node);
}
@@ -577,23 +596,18 @@ TEST(CollectCpuProfile) {
names[2] = v8_str("start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode =
- GetChild(env->GetIsolate(), root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* fooNode =
- GetChild(env->GetIsolate(), startNode, "foo");
+ const v8::CpuProfileNode* fooNode = GetChild(startNode, "foo");
CHECK_EQ(3, fooNode->GetChildrenCount());
const char* barBranch[] = { "bar", "delay", "loop" };
- CheckSimpleBranch(env->GetIsolate(), fooNode, barBranch,
- arraysize(barBranch));
+ CheckSimpleBranch(fooNode, barBranch, arraysize(barBranch));
const char* bazBranch[] = { "baz", "delay", "loop" };
- CheckSimpleBranch(env->GetIsolate(), fooNode, bazBranch,
- arraysize(bazBranch));
+ CheckSimpleBranch(fooNode, bazBranch, arraysize(bazBranch));
const char* delayBranch[] = { "delay", "loop" };
- CheckSimpleBranch(env->GetIsolate(), fooNode, delayBranch,
- arraysize(delayBranch));
+ CheckSimpleBranch(fooNode, delayBranch, arraysize(delayBranch));
profile->Delete();
}
@@ -650,11 +664,10 @@ TEST(HotDeoptNoFrameEntry) {
names[2] = v8_str("start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode =
- GetChild(env->GetIsolate(), root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
- GetChild(env->GetIsolate(), startNode, "foo");
+ GetChild(startNode, "foo");
profile->Delete();
}
@@ -736,17 +749,15 @@ TEST(SampleWhenFrameIsNotSetup) {
names[2] = v8_str("start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode =
- FindChild(env->GetIsolate(), root, "start");
+ const v8::CpuProfileNode* startNode = FindChild(root, "start");
// On slow machines there may be no meaningfull samples at all, skip the
// check there.
if (startNode && startNode->GetChildrenCount() > 0) {
CHECK_EQ(1, startNode->GetChildrenCount());
- const v8::CpuProfileNode* delayNode =
- GetChild(env->GetIsolate(), startNode, "delay");
+ const v8::CpuProfileNode* delayNode = GetChild(startNode, "delay");
if (delayNode->GetChildrenCount() > 0) {
CHECK_EQ(1, delayNode->GetChildrenCount());
- GetChild(env->GetIsolate(), delayNode, "loop");
+ GetChild(delayNode, "loop");
}
}
@@ -842,10 +853,9 @@ TEST(NativeAccessorUninitializedIC) {
RunProfiler(env.local(), function, args, arraysize(args), 180);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode =
- GetChild(isolate, root, "start");
- GetChild(isolate, startNode, "get foo");
- GetChild(isolate, startNode, "set foo");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ GetChild(startNode, "get foo");
+ GetChild(startNode, "set foo");
profile->Delete();
}
@@ -894,10 +904,9 @@ TEST(NativeAccessorMonomorphicIC) {
RunProfiler(env.local(), function, args, arraysize(args), 200);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode =
- GetChild(isolate, root, "start");
- GetChild(isolate, startNode, "get foo");
- GetChild(isolate, startNode, "set foo");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ GetChild(startNode, "get foo");
+ GetChild(startNode, "set foo");
profile->Delete();
}
@@ -944,9 +953,8 @@ TEST(NativeMethodUninitializedIC) {
RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* startNode =
- GetChild(isolate, root, "start");
- GetChild(isolate, startNode, "fooMethod");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ GetChild(startNode, "fooMethod");
profile->Delete();
}
@@ -997,10 +1005,9 @@ TEST(NativeMethodMonomorphicIC) {
RunProfiler(env.local(), function, args, arraysize(args), 100);
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- GetChild(isolate, root, "start");
- const v8::CpuProfileNode* startNode =
- GetChild(isolate, root, "start");
- GetChild(isolate, startNode, "fooMethod");
+ GetChild(root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ GetChild(startNode, "fooMethod");
profile->Delete();
}
@@ -1034,9 +1041,8 @@ TEST(BoundFunctionCall) {
// Don't allow |foo| node to be at the top level.
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode =
- GetChild(env->GetIsolate(), root, "start");
- GetChild(env->GetIsolate(), startNode, "foo");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
+ GetChild(startNode, "foo");
profile->Delete();
}
@@ -1086,10 +1092,10 @@ TEST(TickLines) {
CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate->heap());
profiles->StartProfiling("", false);
ProfileGenerator generator(profiles);
- ProfilerEventsProcessor* processor = new ProfilerEventsProcessor(
- &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100));
+ SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
+ &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
processor->Start();
- CpuProfiler profiler(isolate, profiles, &generator, processor);
+ CpuProfiler profiler(isolate, profiles, &generator, processor.get());
// Enqueue code creation events.
i::Handle<i::String> str = factory->NewStringFromAsciiChecked(func_name);
@@ -1099,7 +1105,7 @@ TEST(TickLines) {
*str, line, column);
// Enqueue a tick event to enable code events processing.
- EnqueueTickSampleEvent(processor, code_address);
+ EnqueueTickSampleEvent(processor.get(), code_address);
processor->StopSynchronously();
@@ -1197,8 +1203,7 @@ TEST(FunctionCallSample) {
// won't be |start| node in the profiles.
bool is_gc_stress_testing =
(i::FLAG_gc_interval != -1) || i::FLAG_stress_compaction;
- const v8::CpuProfileNode* startNode =
- FindChild(env->GetIsolate(), root, "start");
+ const v8::CpuProfileNode* startNode = FindChild(root, "start");
CHECK(is_gc_stress_testing || startNode);
if (startNode) {
ScopedVector<v8::Handle<v8::String> > names(2);
@@ -1207,8 +1212,8 @@ TEST(FunctionCallSample) {
CheckChildrenNames(startNode, names);
}
- const v8::CpuProfileNode* unresolvedNode = FindChild(
- env->GetIsolate(), root, i::ProfileGenerator::kUnresolvedFunctionName);
+ const v8::CpuProfileNode* unresolvedNode =
+ FindChild(root, i::ProfileGenerator::kUnresolvedFunctionName);
if (unresolvedNode) {
ScopedVector<v8::Handle<v8::String> > names(1);
names[0] = v8_str("call");
@@ -1270,8 +1275,7 @@ TEST(FunctionApplySample) {
CheckChildrenNames(root, names);
}
- const v8::CpuProfileNode* startNode =
- FindChild(env->GetIsolate(), root, "start");
+ const v8::CpuProfileNode* startNode = FindChild(root, "start");
if (startNode) {
{
ScopedVector<v8::Handle<v8::String> > names(2);
@@ -1280,8 +1284,7 @@ TEST(FunctionApplySample) {
CheckChildrenNames(startNode, names);
}
- const v8::CpuProfileNode* testNode =
- FindChild(env->GetIsolate(), startNode, "test");
+ const v8::CpuProfileNode* testNode = FindChild(startNode, "test");
if (testNode) {
ScopedVector<v8::Handle<v8::String> > names(3);
names[0] = v8_str("bar");
@@ -1293,12 +1296,11 @@ TEST(FunctionApplySample) {
}
if (const v8::CpuProfileNode* unresolvedNode =
- FindChild(env->GetIsolate(), startNode,
- ProfileGenerator::kUnresolvedFunctionName)) {
+ FindChild(startNode, ProfileGenerator::kUnresolvedFunctionName)) {
ScopedVector<v8::Handle<v8::String> > names(1);
names[0] = v8_str("apply");
CheckChildrenNames(unresolvedNode, names);
- GetChild(env->GetIsolate(), unresolvedNode, "apply");
+ GetChild(unresolvedNode, "apply");
}
}
@@ -1354,10 +1356,9 @@ TEST(CpuProfileDeepStack) {
CheckChildrenNames(root, names);
}
- const v8::CpuProfileNode* node =
- GetChild(env->GetIsolate(), root, "start");
+ const v8::CpuProfileNode* node = GetChild(root, "start");
for (int i = 0; i < 250; ++i) {
- node = GetChild(env->GetIsolate(), node, "foo");
+ node = GetChild(node, "foo");
}
// TODO(alph):
// In theory there must be one more 'foo' and a 'startProfiling' nodes,
@@ -1419,18 +1420,16 @@ TEST(JsNativeJsSample) {
CheckChildrenNames(root, names);
}
- const v8::CpuProfileNode* startNode =
- GetChild(env->GetIsolate(), root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
const v8::CpuProfileNode* nativeFunctionNode =
- GetChild(env->GetIsolate(), startNode, "CallJsFunction");
+ GetChild(startNode, "CallJsFunction");
CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
- const v8::CpuProfileNode* barNode =
- GetChild(env->GetIsolate(), nativeFunctionNode, "bar");
+ const v8::CpuProfileNode* barNode = GetChild(nativeFunctionNode, "bar");
CHECK_EQ(1, barNode->GetChildrenCount());
- GetChild(env->GetIsolate(), barNode, "foo");
+ GetChild(barNode, "foo");
profile->Delete();
}
@@ -1481,22 +1480,20 @@ TEST(JsNativeJsRuntimeJsSample) {
names[2] = v8_str("start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode =
- GetChild(env->GetIsolate(), root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
const v8::CpuProfileNode* nativeFunctionNode =
- GetChild(env->GetIsolate(), startNode, "CallJsFunction");
+ GetChild(startNode, "CallJsFunction");
CHECK_EQ(1, nativeFunctionNode->GetChildrenCount());
- const v8::CpuProfileNode* barNode =
- GetChild(env->GetIsolate(), nativeFunctionNode, "bar");
+ const v8::CpuProfileNode* barNode = GetChild(nativeFunctionNode, "bar");
// The child is in fact a bound foo.
// A bound function has a wrapper that may make calls to
// other functions e.g. "get length".
CHECK_LE(1, barNode->GetChildrenCount());
CHECK_GE(2, barNode->GetChildrenCount());
- GetChild(env->GetIsolate(), barNode, "foo");
+ GetChild(barNode, "foo");
profile->Delete();
}
@@ -1560,22 +1557,19 @@ TEST(JsNative1JsNative2JsSample) {
names[2] = v8_str("start");
CheckChildrenNames(root, names);
- const v8::CpuProfileNode* startNode =
- GetChild(env->GetIsolate(), root, "start");
+ const v8::CpuProfileNode* startNode = GetChild(root, "start");
CHECK_EQ(1, startNode->GetChildrenCount());
const v8::CpuProfileNode* nativeNode1 =
- GetChild(env->GetIsolate(), startNode, "CallJsFunction1");
+ GetChild(startNode, "CallJsFunction1");
CHECK_EQ(1, nativeNode1->GetChildrenCount());
- const v8::CpuProfileNode* barNode =
- GetChild(env->GetIsolate(), nativeNode1, "bar");
+ const v8::CpuProfileNode* barNode = GetChild(nativeNode1, "bar");
CHECK_EQ(1, barNode->GetChildrenCount());
- const v8::CpuProfileNode* nativeNode2 =
- GetChild(env->GetIsolate(), barNode, "CallJsFunction2");
+ const v8::CpuProfileNode* nativeNode2 = GetChild(barNode, "CallJsFunction2");
CHECK_EQ(1, nativeNode2->GetChildrenCount());
- GetChild(env->GetIsolate(), nativeNode2, "foo");
+ GetChild(nativeNode2, "foo");
profile->Delete();
}
@@ -1620,12 +1614,12 @@ TEST(IdleTime) {
CheckChildrenNames(root, names);
const v8::CpuProfileNode* programNode =
- GetChild(env->GetIsolate(), root, ProfileGenerator::kProgramEntryName);
+ GetChild(root, ProfileGenerator::kProgramEntryName);
CHECK_EQ(0, programNode->GetChildrenCount());
CHECK_GE(programNode->GetHitCount(), 3u);
const v8::CpuProfileNode* idleNode =
- GetChild(env->GetIsolate(), root, ProfileGenerator::kIdleEntryName);
+ GetChild(root, ProfileGenerator::kIdleEntryName);
CHECK_EQ(0, idleNode->GetChildrenCount());
CHECK_GE(idleNode->GetHitCount(), 3u);
@@ -1672,16 +1666,16 @@ TEST(FunctionDetails) {
// 0 foo 18 #4 TryCatchStatement script_a:2
// 1 bar 18 #5 no reason script_a:3
const v8::CpuProfileNode* root = profile->GetTopDownRoot();
- const v8::CpuProfileNode* script = GetChild(env->GetIsolate(), root, "");
+ const v8::CpuProfileNode* script = GetChild(root, "");
CheckFunctionDetails(env->GetIsolate(), script, "", "script_b",
script_b->GetUnboundScript()->GetId(), 1, 1);
- const v8::CpuProfileNode* baz = GetChild(env->GetIsolate(), script, "baz");
+ const v8::CpuProfileNode* baz = GetChild(script, "baz");
CheckFunctionDetails(env->GetIsolate(), baz, "baz", "script_b",
script_b->GetUnboundScript()->GetId(), 3, 16);
- const v8::CpuProfileNode* foo = GetChild(env->GetIsolate(), baz, "foo");
+ const v8::CpuProfileNode* foo = GetChild(baz, "foo");
CheckFunctionDetails(env->GetIsolate(), foo, "foo", "script_a",
script_a->GetUnboundScript()->GetId(), 2, 1);
- const v8::CpuProfileNode* bar = GetChild(env->GetIsolate(), foo, "bar");
+ const v8::CpuProfileNode* bar = GetChild(foo, "bar");
CheckFunctionDetails(env->GetIsolate(), bar, "bar", "script_a",
script_a->GetUnboundScript()->GetId(), 3, 14);
}
@@ -1720,39 +1714,17 @@ TEST(DontStopOnFinishedProfileDelete) {
}
-static const char* collect_deopt_events_test_source =
- "function opt_function(left, right, depth) {\n"
- " if (depth) return opt_function(left, right, depth - 1);\n"
- "\n"
- " var k = left / 10;\n"
- " var r = 10 / right;\n"
- " return k + r;"
- "}\n"
- "\n"
- "function test(left, right) {\n"
- " return opt_function(left, right, 1);\n"
- "}\n"
- "\n"
- "startProfiling();\n"
- "\n"
- "test(10, 10);\n"
- "\n"
- "%OptimizeFunctionOnNextCall(opt_function)\n"
- "\n"
- "test(10, 10);\n"
- "\n"
- "test(undefined, 10);\n"
- "\n"
- "%OptimizeFunctionOnNextCall(opt_function)\n"
- "\n"
- "test(10, 10);\n"
- "\n"
- "test(10, 0);\n"
- "\n"
- "stopProfiling();\n"
- "\n";
+const char* GetBranchDeoptReason(i::CpuProfile* iprofile, const char* branch[],
+ int length) {
+ v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
+ const ProfileNode* iopt_function = NULL;
+ iopt_function = GetSimpleBranch(profile, branch, length);
+ CHECK_EQ(1, iopt_function->deopt_infos().size());
+ return iopt_function->deopt_infos()[0].deopt_reason;
+}
+// deopt at top function
TEST(CollectDeoptEvents) {
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
@@ -1763,21 +1735,90 @@ TEST(CollectDeoptEvents) {
v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
- v8::Script::Compile(v8_str(collect_deopt_events_test_source))->Run();
+ const char opt_source[] =
+ "function opt_function%d(value, depth) {\n"
+ " if (depth) return opt_function%d(value, depth - 1);\n"
+ "\n"
+ " return 10 / value;\n"
+ "}\n"
+ "\n";
+
+ for (int i = 0; i < 3; ++i) {
+ i::EmbeddedVector<char, sizeof(opt_source) + 100> buffer;
+ i::SNPrintF(buffer, opt_source, i, i);
+ v8::Script::Compile(v8_str(buffer.start()))->Run();
+ }
+
+ const char* source =
+ "startProfiling();\n"
+ "\n"
+ "opt_function0(1, 1);\n"
+ "\n"
+ "%OptimizeFunctionOnNextCall(opt_function0)\n"
+ "\n"
+ "opt_function0(1, 1);\n"
+ "\n"
+ "opt_function0(undefined, 1);\n"
+ "\n"
+ "opt_function1(1, 1);\n"
+ "\n"
+ "%OptimizeFunctionOnNextCall(opt_function1)\n"
+ "\n"
+ "opt_function1(1, 1);\n"
+ "\n"
+ "opt_function1(NaN, 1);\n"
+ "\n"
+ "opt_function2(1, 1);\n"
+ "\n"
+ "%OptimizeFunctionOnNextCall(opt_function2)\n"
+ "\n"
+ "opt_function2(1, 1);\n"
+ "\n"
+ "opt_function2(0, 1);\n"
+ "\n"
+ "stopProfiling();\n"
+ "\n";
+
+ v8::Script::Compile(v8_str(source))->Run();
i::CpuProfile* iprofile = iprofiler->GetProfile(0);
iprofile->Print();
- v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
- const char* branch[] = {"", "test", "opt_function", "opt_function"};
- const v8::CpuProfileNode* opt_function = GetSimpleBranch(
- env->GetIsolate(), profile->GetTopDownRoot(), branch, arraysize(branch));
- CHECK(opt_function);
- const i::ProfileNode* iopt_function =
- reinterpret_cast<const i::ProfileNode*>(opt_function);
- CHECK_EQ(2, iopt_function->deopt_infos().length());
- CHECK_EQ(i::Deoptimizer::GetDeoptReason(i::Deoptimizer::kNotAHeapNumber),
- iopt_function->deopt_infos()[0].deopt_reason);
- CHECK_EQ(i::Deoptimizer::GetDeoptReason(i::Deoptimizer::kDivisionByZero),
- iopt_function->deopt_infos()[1].deopt_reason);
+ /* The expected profile
+ [Top down]:
+ 0 (root) 0 #1
+ 23 32 #2
+ 1 opt_function2 31 #7
+ 1 opt_function2 31 #8
+ ;;; deopted at script_id: 31 position: 106 with reason
+ 'division by zero'.
+ 2 opt_function0 29 #3
+ 4 opt_function0 29 #4
+ ;;; deopted at script_id: 29 position: 108 with reason 'not a
+ heap number'.
+ 0 opt_function1 30 #5
+ 1 opt_function1 30 #6
+ ;;; deopted at script_id: 30 position: 108 with reason 'lost
+ precision or NaN'.
+ */
+
+ {
+ const char* branch[] = {"", "opt_function0", "opt_function0"};
+ CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber),
+ GetBranchDeoptReason(iprofile, branch, arraysize(branch)));
+ }
+ {
+ const char* branch[] = {"", "opt_function1", "opt_function1"};
+ const char* deopt_reason =
+ GetBranchDeoptReason(iprofile, branch, arraysize(branch));
+ if (deopt_reason != reason(i::Deoptimizer::kNaN) &&
+ deopt_reason != reason(i::Deoptimizer::kLostPrecisionOrNaN)) {
+ FATAL(deopt_reason);
+ }
+ }
+ {
+ const char* branch[] = {"", "opt_function2", "opt_function2"};
+ CHECK_EQ(reason(i::Deoptimizer::kDivisionByZero),
+ GetBranchDeoptReason(iprofile, branch, arraysize(branch)));
+ }
iprofiler->DeleteProfile(iprofile);
}
@@ -1796,3 +1837,197 @@ TEST(SourceLocation) {
v8::Script::Compile(v8_str(source))->Run();
}
+
+
+static const char* inlined_source =
+ "function opt_function(left, right) { var k = left / 10; var r = 10 / "
+ "right; return k + r; }\n";
+// 0.........1.........2.........3.........4....*....5.........6......*..7
+
+
+// deopt at the first level inlined function
+TEST(DeoptAtFirstLevelInlinedSource) {
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
+ i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+
+ // 0.........1.........2.........3.........4.........5.........6.........7
+ const char* source =
+ "function test(left, right) { return opt_function(left, right); }\n"
+ "\n"
+ "startProfiling();\n"
+ "\n"
+ "test(10, 10);\n"
+ "\n"
+ "%OptimizeFunctionOnNextCall(test)\n"
+ "\n"
+ "test(10, 10);\n"
+ "\n"
+ "test(undefined, 10);\n"
+ "\n"
+ "stopProfiling();\n"
+ "\n";
+
+ v8::Handle<v8::Script> inlined_script = v8_compile(inlined_source);
+ inlined_script->Run();
+ int inlined_script_id = inlined_script->GetUnboundScript()->GetId();
+
+ v8::Handle<v8::Script> script = v8_compile(source);
+ script->Run();
+ int script_id = script->GetUnboundScript()->GetId();
+
+ i::CpuProfile* iprofile = iprofiler->GetProfile(0);
+ iprofile->Print();
+ /* The expected profile output
+ [Top down]:
+ 0 (root) 0 #1
+ 10 30 #2
+ 1 test 30 #3
+ ;;; deopted at script_id: 29 position: 45 with reason 'not a
+ heap number'.
+ ;;; Inline point: script_id 30 position: 36.
+ 4 opt_function 29 #4
+ */
+ v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
+
+ const char* branch[] = {"", "test"};
+ const ProfileNode* itest_node =
+ GetSimpleBranch(profile, branch, arraysize(branch));
+ const std::vector<i::DeoptInfo>& deopt_infos = itest_node->deopt_infos();
+ CHECK_EQ(1, deopt_infos.size());
+
+ const i::DeoptInfo& info = deopt_infos[0];
+ CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber), info.deopt_reason);
+ CHECK_EQ(2, info.stack.size());
+ CHECK_EQ(inlined_script_id, info.stack[0].script_id);
+ CHECK_EQ(offset(inlined_source, "left /"), info.stack[0].position);
+ CHECK_EQ(script_id, info.stack[1].script_id);
+ CHECK_EQ(offset(source, "opt_function(left,"), info.stack[1].position);
+
+ iprofiler->DeleteProfile(iprofile);
+}
+
+
+// deopt at the second level inlined function
+TEST(DeoptAtSecondLevelInlinedSource) {
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
+ i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+
+ // 0.........1.........2.........3.........4.........5.........6.........7
+ const char* source =
+ "function test2(left, right) { return opt_function(left, right); }\n"
+ "function test1(left, right) { return test2(left, right); }\n"
+ "\n"
+ "startProfiling();\n"
+ "\n"
+ "test1(10, 10);\n"
+ "\n"
+ "%OptimizeFunctionOnNextCall(test1)\n"
+ "\n"
+ "test1(10, 10);\n"
+ "\n"
+ "test1(undefined, 10);\n"
+ "\n"
+ "stopProfiling();\n"
+ "\n";
+
+ v8::Handle<v8::Script> inlined_script = v8_compile(inlined_source);
+ inlined_script->Run();
+ int inlined_script_id = inlined_script->GetUnboundScript()->GetId();
+
+ v8::Handle<v8::Script> script = v8_compile(source);
+ script->Run();
+ int script_id = script->GetUnboundScript()->GetId();
+
+ i::CpuProfile* iprofile = iprofiler->GetProfile(0);
+ iprofile->Print();
+ /* The expected profile output
+ [Top down]:
+ 0 (root) 0 #1
+ 11 30 #2
+ 1 test1 30 #3
+ ;;; deopted at script_id: 29 position: 45 with reason 'not a
+ heap number'.
+ ;;; Inline point: script_id 30 position: 37.
+ ;;; Inline point: script_id 30 position: 103.
+ 1 test2 30 #4
+ 3 opt_function 29 #5
+ */
+
+ v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
+
+ const char* branch[] = {"", "test1"};
+ const ProfileNode* itest_node =
+ GetSimpleBranch(profile, branch, arraysize(branch));
+ const std::vector<i::DeoptInfo>& deopt_infos = itest_node->deopt_infos();
+ CHECK_EQ(1, deopt_infos.size());
+
+ const i::DeoptInfo info = deopt_infos[0];
+ CHECK_EQ(reason(i::Deoptimizer::kNotAHeapNumber), info.deopt_reason);
+ CHECK_EQ(3, info.stack.size());
+ CHECK_EQ(inlined_script_id, info.stack[0].script_id);
+ CHECK_EQ(offset(inlined_source, "left /"), info.stack[0].position);
+ CHECK_EQ(script_id, info.stack[1].script_id);
+ CHECK_EQ(offset(source, "opt_function(left,"), info.stack[1].position);
+ CHECK_EQ(offset(source, "test2(left, right);"), info.stack[2].position);
+
+ iprofiler->DeleteProfile(iprofile);
+}
+
+
+// deopt in untracked function
+TEST(DeoptUntrackedFunction) {
+ if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ i::FLAG_allow_natives_syntax = true;
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+ v8::Context::Scope context_scope(env);
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::CpuProfiler* profiler = isolate->GetCpuProfiler();
+ i::CpuProfiler* iprofiler = reinterpret_cast<i::CpuProfiler*>(profiler);
+
+ // 0.........1.........2.........3.........4.........5.........6.........7
+ const char* source =
+ "function test(left, right) { return opt_function(left, right); }\n"
+ "\n"
+ "test(10, 10);\n"
+ "\n"
+ "%OptimizeFunctionOnNextCall(test)\n"
+ "\n"
+ "test(10, 10);\n"
+ "\n"
+ "startProfiling();\n" // profiler started after compilation.
+ "\n"
+ "test(undefined, 10);\n"
+ "\n"
+ "stopProfiling();\n"
+ "\n";
+
+ v8::Handle<v8::Script> inlined_script = v8_compile(inlined_source);
+ inlined_script->Run();
+
+ v8::Handle<v8::Script> script = v8_compile(source);
+ script->Run();
+
+ i::CpuProfile* iprofile = iprofiler->GetProfile(0);
+ iprofile->Print();
+ v8::CpuProfile* profile = reinterpret_cast<v8::CpuProfile*>(iprofile);
+
+ const char* branch[] = {"", "test"};
+ const ProfileNode* itest_node =
+ GetSimpleBranch(profile, branch, arraysize(branch));
+ CHECK_EQ(0, itest_node->deopt_infos().size());
+
+ iprofiler->DeleteProfile(iprofile);
+}
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index 2f722c2baf..2bcc625a95 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -28,7 +28,6 @@
#include "src/v8.h"
#include "src/global-handles.h"
-#include "src/snapshot.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index a4a993ad30..5929379663 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -416,7 +416,7 @@ void CheckDebuggerUnloaded(bool check_functions) {
if (RelocInfo::IsCodeTarget(rmode)) {
CHECK(!Debug::IsDebugBreak(it.rinfo()->target_address()));
} else if (RelocInfo::IsJSReturn(rmode)) {
- CHECK(!Debug::IsDebugBreakAtReturn(it.rinfo()));
+ CHECK(!it.rinfo()->IsPatchedReturnSequence());
}
}
}
@@ -437,47 +437,36 @@ static void CheckDebuggerUnloaded(bool check_functions = false) {
}
-// Inherit from BreakLocationIterator to get access to protected parts for
-// testing.
-class TestBreakLocationIterator: public v8::internal::BreakLocationIterator {
- public:
- explicit TestBreakLocationIterator(Handle<v8::internal::DebugInfo> debug_info)
- : BreakLocationIterator(debug_info, v8::internal::SOURCE_BREAK_LOCATIONS) {}
- v8::internal::RelocIterator* it() { return reloc_iterator_; }
- v8::internal::RelocIterator* it_original() {
- return reloc_iterator_original_;
- }
-};
-
-
// Compile a function, set a break point and check that the call at the break
// location in the code is the expected debug_break function.
void CheckDebugBreakFunction(DebugLocalContext* env,
const char* source, const char* name,
int position, v8::internal::RelocInfo::Mode mode,
Code* debug_break) {
- v8::internal::Debug* debug = CcTest::i_isolate()->debug();
+ i::Debug* debug = CcTest::i_isolate()->debug();
// Create function and set the break point.
- Handle<v8::internal::JSFunction> fun = v8::Utils::OpenHandle(
- *CompileFunction(env, source, name));
+ Handle<i::JSFunction> fun =
+ v8::Utils::OpenHandle(*CompileFunction(env, source, name));
int bp = SetBreakPoint(fun, position);
// Check that the debug break function is as expected.
- Handle<v8::internal::SharedFunctionInfo> shared(fun->shared());
+ Handle<i::SharedFunctionInfo> shared(fun->shared());
CHECK(Debug::HasDebugInfo(shared));
- TestBreakLocationIterator it1(Debug::GetDebugInfo(shared));
- it1.FindBreakLocationFromPosition(position, v8::internal::STATEMENT_ALIGNED);
- v8::internal::RelocInfo::Mode actual_mode = it1.it()->rinfo()->rmode();
- if (actual_mode == v8::internal::RelocInfo::CODE_TARGET_WITH_ID) {
- actual_mode = v8::internal::RelocInfo::CODE_TARGET;
+ i::BreakLocation location = i::BreakLocation::FromPosition(
+ Debug::GetDebugInfo(shared), i::SOURCE_BREAK_LOCATIONS, position,
+ i::STATEMENT_ALIGNED);
+ i::RelocInfo::Mode actual_mode = location.rmode();
+ if (actual_mode == i::RelocInfo::CODE_TARGET_WITH_ID) {
+ actual_mode = i::RelocInfo::CODE_TARGET;
}
CHECK_EQ(mode, actual_mode);
- if (mode != v8::internal::RelocInfo::JS_RETURN) {
- CHECK_EQ(debug_break,
- Code::GetCodeFromTargetAddress(it1.it()->rinfo()->target_address()));
+ if (mode != i::RelocInfo::JS_RETURN) {
+ CHECK_EQ(debug_break, *location.CodeTarget());
} else {
- CHECK(Debug::IsDebugBreakAtReturn(it1.it()->rinfo()));
+ i::RelocInfo rinfo = location.rinfo();
+ CHECK(i::RelocInfo::IsJSReturn(rinfo.rmode()));
+ CHECK(rinfo.IsPatchedReturnSequence());
}
// Clear the break point and check that the debug break function is no longer
@@ -485,15 +474,17 @@ void CheckDebugBreakFunction(DebugLocalContext* env,
ClearBreakPoint(bp);
CHECK(!debug->HasDebugInfo(shared));
CHECK(debug->EnsureDebugInfo(shared, fun));
- TestBreakLocationIterator it2(Debug::GetDebugInfo(shared));
- it2.FindBreakLocationFromPosition(position, v8::internal::STATEMENT_ALIGNED);
- actual_mode = it2.it()->rinfo()->rmode();
- if (actual_mode == v8::internal::RelocInfo::CODE_TARGET_WITH_ID) {
- actual_mode = v8::internal::RelocInfo::CODE_TARGET;
+ location = i::BreakLocation::FromPosition(Debug::GetDebugInfo(shared),
+ i::SOURCE_BREAK_LOCATIONS, position,
+ i::STATEMENT_ALIGNED);
+ actual_mode = location.rmode();
+ if (actual_mode == i::RelocInfo::CODE_TARGET_WITH_ID) {
+ actual_mode = i::RelocInfo::CODE_TARGET;
}
CHECK_EQ(mode, actual_mode);
- if (mode == v8::internal::RelocInfo::JS_RETURN) {
- CHECK(!Debug::IsDebugBreakAtReturn(it2.it()->rinfo()));
+ if (mode == i::RelocInfo::JS_RETURN) {
+ i::RelocInfo rinfo = location.rinfo();
+ CHECK(!rinfo.IsPatchedReturnSequence());
}
}
@@ -6706,6 +6697,7 @@ TEST(ProcessDebugMessagesThreaded) {
v8::FunctionTemplate::New(isolate, StartSendingCommands);
env->Global()->Set(v8_str("start"), start->GetFunction());
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
CompileRun("start(); while (true) { }");
CHECK_EQ(20, counting_message_handler_counter);
@@ -7655,7 +7647,6 @@ static void DebugHarmonyScopingListener(
TEST(DebugBreakInLexicalScopes) {
- i::FLAG_harmony_scoping = true;
i::FLAG_allow_natives_syntax = true;
DebugLocalContext env;
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 5d487bb7da..f3dc777102 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -637,7 +637,6 @@ TEST(CrossScriptReferences) {
TEST(CrossScriptReferences_Simple) {
- i::FLAG_harmony_scoping = true;
i::FLAG_use_strict = true;
v8::Isolate* isolate = CcTest::isolate();
@@ -652,7 +651,6 @@ TEST(CrossScriptReferences_Simple) {
TEST(CrossScriptReferences_Simple2) {
- i::FLAG_harmony_scoping = true;
i::FLAG_use_strict = true;
v8::Isolate* isolate = CcTest::isolate();
@@ -675,8 +673,6 @@ TEST(CrossScriptReferences_Simple2) {
TEST(CrossScriptReferencesHarmony) {
- i::FLAG_harmony_scoping = true;
-
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
@@ -818,9 +814,27 @@ TEST(CrossScriptReferencesHarmony) {
}
+TEST(CrossScriptReferencesHarmonyRegress) {
+ v8::Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ SimpleContext context;
+ context.Check(
+ "'use strict';"
+ "function i1() { "
+ " let y = 10; return (typeof x2 === 'undefined' ? 0 : 2) + y"
+ "}"
+ "i1();"
+ "i1();",
+ EXPECT_RESULT, Number::New(isolate, 10));
+ context.Check(
+ "'use strict';"
+ "let x2 = 2; i1();",
+ EXPECT_RESULT, Number::New(isolate, 12));
+}
+
+
TEST(GlobalLexicalOSR) {
i::FLAG_use_strict = true;
- i::FLAG_harmony_scoping = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
@@ -844,7 +858,6 @@ TEST(GlobalLexicalOSR) {
TEST(CrossScriptConflicts) {
i::FLAG_use_strict = true;
- i::FLAG_harmony_scoping = true;
HandleScope scope(CcTest::isolate());
@@ -880,8 +893,6 @@ TEST(CrossScriptConflicts) {
TEST(CrossScriptDynamicLookup) {
- i::FLAG_harmony_scoping = true;
-
HandleScope handle_scope(CcTest::isolate());
{
@@ -913,8 +924,6 @@ TEST(CrossScriptDynamicLookup) {
TEST(CrossScriptGlobal) {
- i::FLAG_harmony_scoping = true;
-
HandleScope handle_scope(CcTest::isolate());
{
SimpleContext context;
@@ -957,8 +966,6 @@ TEST(CrossScriptGlobal) {
TEST(CrossScriptStaticLookupUndeclared) {
- i::FLAG_harmony_scoping = true;
-
HandleScope handle_scope(CcTest::isolate());
{
@@ -991,7 +998,6 @@ TEST(CrossScriptStaticLookupUndeclared) {
TEST(CrossScriptLoadICs) {
- i::FLAG_harmony_scoping = true;
i::FLAG_allow_natives_syntax = true;
HandleScope handle_scope(CcTest::isolate());
@@ -1047,7 +1053,6 @@ TEST(CrossScriptLoadICs) {
TEST(CrossScriptStoreICs) {
- i::FLAG_harmony_scoping = true;
i::FLAG_allow_natives_syntax = true;
HandleScope handle_scope(CcTest::isolate());
@@ -1125,7 +1130,6 @@ TEST(CrossScriptStoreICs) {
TEST(CrossScriptAssignmentToConst) {
- i::FLAG_harmony_scoping = true;
i::FLAG_allow_natives_syntax = true;
HandleScope handle_scope(CcTest::isolate());
@@ -1148,7 +1152,6 @@ TEST(CrossScriptAssignmentToConst) {
TEST(Regress425510) {
- i::FLAG_harmony_scoping = true;
i::FLAG_allow_natives_syntax = true;
HandleScope handle_scope(CcTest::isolate());
@@ -1163,3 +1166,85 @@ TEST(Regress425510) {
}
}
}
+
+
+TEST(Regress3941) {
+ i::FLAG_allow_natives_syntax = true;
+
+ HandleScope handle_scope(CcTest::isolate());
+
+ {
+ SimpleContext context;
+ context.Check("function f() { x = 1; }", EXPECT_RESULT,
+ Undefined(CcTest::isolate()));
+ context.Check("'use strict'; f(); let x = 2; x", EXPECT_EXCEPTION);
+ }
+
+
+ {
+ // Train ICs.
+ SimpleContext context;
+ context.Check("function f() { x = 1; }", EXPECT_RESULT,
+ Undefined(CcTest::isolate()));
+ for (int i = 0; i < 4; i++) {
+ context.Check("f(); x", EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
+ }
+ context.Check("'use strict'; f(); let x = 2; x", EXPECT_EXCEPTION);
+ }
+
+
+ {
+ // Optimize.
+ SimpleContext context;
+ context.Check("function f() { x = 1; }", EXPECT_RESULT,
+ Undefined(CcTest::isolate()));
+ for (int i = 0; i < 4; i++) {
+ context.Check("f(); x", EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
+ }
+ context.Check("%OptimizeFunctionOnNextCall(f); f(); x", EXPECT_RESULT,
+ Number::New(CcTest::isolate(), 1));
+
+ context.Check("'use strict'; f(); let x = 2; x", EXPECT_EXCEPTION);
+ }
+}
+
+
+TEST(Regress3941_Reads) {
+ i::FLAG_allow_natives_syntax = true;
+
+ HandleScope handle_scope(CcTest::isolate());
+
+ {
+ SimpleContext context;
+ context.Check("function f() { return x; }", EXPECT_RESULT,
+ Undefined(CcTest::isolate()));
+ context.Check("'use strict'; f(); let x = 2; x", EXPECT_EXCEPTION);
+ }
+
+
+ {
+ // Train ICs.
+ SimpleContext context;
+ context.Check("function f() { return x; }", EXPECT_RESULT,
+ Undefined(CcTest::isolate()));
+ for (int i = 0; i < 4; i++) {
+ context.Check("f()", EXPECT_EXCEPTION);
+ }
+ context.Check("'use strict'; f(); let x = 2; x", EXPECT_EXCEPTION);
+ }
+
+
+ {
+ // Optimize.
+ SimpleContext context;
+ context.Check("function f() { return x; }", EXPECT_RESULT,
+ Undefined(CcTest::isolate()));
+ for (int i = 0; i < 4; i++) {
+ context.Check("f()", EXPECT_EXCEPTION);
+ }
+ context.Check("%OptimizeFunctionOnNextCall(f);", EXPECT_RESULT,
+ Undefined(CcTest::isolate()));
+
+ context.Check("'use strict'; f(); let x = 2; x", EXPECT_EXCEPTION);
+ }
+}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 095c63679c..502b641df6 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -34,7 +34,6 @@
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index a2eaa15ed3..ca4a4f2868 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -34,7 +34,6 @@
#include "src/disassembler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -442,6 +441,10 @@ TEST(DisasmIa320) {
__ subsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ divsd(xmm1, xmm0);
__ divsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ minsd(xmm1, xmm0);
+ __ minsd(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ maxsd(xmm1, xmm0);
+ __ maxsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ ucomisd(xmm0, xmm1);
__ cmpltsd(xmm0, xmm1);
@@ -451,6 +454,11 @@ TEST(DisasmIa320) {
__ psrlq(xmm0, 17);
__ psrlq(xmm0, xmm1);
__ por(xmm0, xmm1);
+
+ __ pcmpeqd(xmm1, xmm0);
+
+ __ punpckldq(xmm1, xmm6);
+ __ punpckhdq(xmm7, xmm5);
}
// cmov.
@@ -494,6 +502,10 @@ TEST(DisasmIa320) {
__ vsubsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vdivsd(xmm0, xmm1, xmm2);
__ vdivsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vminsd(xmm0, xmm1, xmm2);
+ __ vminsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vmaxsd(xmm0, xmm1, xmm2);
+ __ vmaxsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
}
}
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index 131f41384c..ca928a61eb 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -34,7 +34,6 @@
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index d682d33480..a2a93c611a 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -34,7 +34,6 @@
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-disasm-ppc.cc b/deps/v8/test/cctest/test-disasm-ppc.cc
index 87b9ade055..ed409f2f9d 100644
--- a/deps/v8/test/cctest/test-disasm-ppc.cc
+++ b/deps/v8/test/cctest/test-disasm-ppc.cc
@@ -34,7 +34,6 @@
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 6cd58ec209..cdedc8be55 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -34,7 +34,6 @@
#include "src/disassembler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -89,6 +88,9 @@ TEST(DisasmX64) {
__ addq(rdi, Operand(rbp, rcx, times_4, -3999));
__ addq(Operand(rbp, rcx, times_4, 12), Immediate(12));
+ __ bsrl(rax, r15);
+ __ bsrl(r9, Operand(rcx, times_8, 91919));
+
__ nop();
__ addq(rbx, Immediate(12));
__ nop();
@@ -436,6 +438,10 @@ TEST(DisasmX64) {
__ subsd(xmm1, Operand(rbx, rcx, times_4, 10000));
__ divsd(xmm1, xmm0);
__ divsd(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ minsd(xmm1, xmm0);
+ __ minsd(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ maxsd(xmm1, xmm0);
+ __ maxsd(xmm1, Operand(rbx, rcx, times_4, 10000));
__ ucomisd(xmm0, xmm1);
__ andpd(xmm0, xmm1);
@@ -446,6 +452,9 @@ TEST(DisasmX64) {
__ psrlq(xmm0, 6);
__ pcmpeqd(xmm1, xmm0);
+
+ __ punpckldq(xmm1, xmm11);
+ __ punpckhdq(xmm8, xmm15);
}
// cmov.
@@ -472,6 +481,10 @@ TEST(DisasmX64) {
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope(&assm, SSE4_1);
__ extractps(rax, xmm1, 0);
+ __ pextrd(rbx, xmm15, 0);
+ __ pextrd(r12, xmm0, 1);
+ __ pinsrd(xmm9, r9, 0);
+ __ pinsrd(xmm5, rax, 1);
}
}
@@ -486,7 +499,11 @@ TEST(DisasmX64) {
__ vsubsd(xmm0, xmm1, xmm2);
__ vsubsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
__ vdivsd(xmm0, xmm1, xmm2);
- __ vdivsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ vdivsd(xmm0, xmm1, Operand(rbx, rcx, times_2, 10000));
+ __ vminsd(xmm8, xmm1, xmm2);
+ __ vminsd(xmm9, xmm1, Operand(rbx, rcx, times_8, 10000));
+ __ vmaxsd(xmm8, xmm1, xmm2);
+ __ vmaxsd(xmm9, xmm1, Operand(rbx, rcx, times_1, 10000));
}
}
diff --git a/deps/v8/test/cctest/test-disasm-x87.cc b/deps/v8/test/cctest/test-disasm-x87.cc
index e9b0dc5474..a3433b290b 100644
--- a/deps/v8/test/cctest/test-disasm-x87.cc
+++ b/deps/v8/test/cctest/test-disasm-x87.cc
@@ -34,7 +34,6 @@
#include "src/disassembler.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index 89c475eab5..f53dfde10e 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -22,10 +22,11 @@ TEST(VectorStructure) {
v8::HandleScope scope(context->GetIsolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
+ Zone* zone = isolate->runtime_zone();
// Empty vectors are the empty fixed array.
FeedbackVectorSpec empty;
- Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(empty);
+ Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(&empty);
CHECK(Handle<FixedArray>::cast(vector)
.is_identical_to(factory->empty_fixed_array()));
// Which can nonetheless be queried.
@@ -34,24 +35,21 @@ TEST(VectorStructure) {
CHECK_EQ(0, vector->Slots());
CHECK_EQ(0, vector->ICSlots());
- FeedbackVectorSpec one_slot(1, 0);
- vector = factory->NewTypeFeedbackVector(one_slot);
+ FeedbackVectorSpec one_slot(1);
+ vector = factory->NewTypeFeedbackVector(&one_slot);
CHECK_EQ(1, vector->Slots());
CHECK_EQ(0, vector->ICSlots());
- FeedbackVectorSpec one_icslot(0, 1);
- if (FLAG_vector_ics) {
- one_icslot.SetKind(0, Code::CALL_IC);
- }
- vector = factory->NewTypeFeedbackVector(one_icslot);
+ FeedbackVectorSpec one_icslot(0, Code::CALL_IC);
+ vector = factory->NewTypeFeedbackVector(&one_icslot);
CHECK_EQ(0, vector->Slots());
CHECK_EQ(1, vector->ICSlots());
- FeedbackVectorSpec spec(3, 5);
+ ZoneFeedbackVectorSpec spec(zone, 3, 5);
if (FLAG_vector_ics) {
for (int i = 0; i < 5; i++) spec.SetKind(i, Code::CALL_IC);
}
- vector = factory->NewTypeFeedbackVector(spec);
+ vector = factory->NewTypeFeedbackVector(&spec);
CHECK_EQ(3, vector->Slots());
CHECK_EQ(5, vector->ICSlots());
@@ -71,8 +69,8 @@ TEST(VectorStructure) {
CHECK_EQ(index,
TypeFeedbackVector::kReservedIndexCount + metadata_length + 3);
CHECK(FeedbackVectorICSlot(0) == vector->ToICSlot(index));
-
- CHECK_EQ(TypeFeedbackVector::kReservedIndexCount + metadata_length + 3 + 5,
+ CHECK_EQ(TypeFeedbackVector::kReservedIndexCount + metadata_length + 3 +
+ 5 * TypeFeedbackVector::elements_per_ic_slot(),
vector->length());
}
@@ -88,8 +86,9 @@ TEST(VectorICMetadata) {
}
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
+ Zone* zone = isolate->runtime_zone();
- FeedbackVectorSpec spec(10, 3 * 10);
+ ZoneFeedbackVectorSpec spec(zone, 10, 3 * 10);
// Set metadata.
for (int i = 0; i < 30; i++) {
Code::Kind kind;
@@ -103,7 +102,7 @@ TEST(VectorICMetadata) {
spec.SetKind(i, kind);
}
- Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(spec);
+ Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(&spec);
CHECK_EQ(10, vector->Slots());
CHECK_EQ(3 * 10, vector->ICSlots());
@@ -136,8 +135,8 @@ TEST(VectorSlotClearing) {
// We only test clearing FeedbackVectorSlots, not FeedbackVectorICSlots.
// The reason is that FeedbackVectorICSlots need a full code environment
// to fully test (See VectorICProfilerStatistics test below).
- FeedbackVectorSpec spec(5, 0);
- Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(spec);
+ FeedbackVectorSpec spec(5);
+ Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(&spec);
// Fill with information
vector->Set(FeedbackVectorSlot(0), Smi::FromInt(1));
@@ -307,6 +306,34 @@ TEST(VectorLoadICStates) {
}
+TEST(VectorLoadICSlotSharing) {
+ if (i::FLAG_always_opt || !i::FLAG_vector_ics) return;
+ CcTest::InitializeVM();
+ LocalContext context;
+ v8::HandleScope scope(context->GetIsolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ // Function f has 3 LoadICs, one for each o, but the ICs share the same
+ // feedback vector IC slot.
+ CompileRun(
+ "var o = 10;"
+ "function f() {"
+ " var x = o + 10;"
+ " return o + x + o;"
+ "}"
+ "f();");
+ Handle<JSFunction> f = v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
+ // There should be one IC slot.
+ Handle<TypeFeedbackVector> feedback_vector =
+ Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
+ CHECK_EQ(1, feedback_vector->ICSlots());
+ FeedbackVectorICSlot slot(0);
+ LoadICNexus nexus(feedback_vector, slot);
+ CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+}
+
+
TEST(VectorLoadICOnSmi) {
if (i::FLAG_always_opt || !i::FLAG_vector_ics) return;
CcTest::InitializeVM();
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 24f9c73532..ae8e77d745 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -81,11 +81,9 @@ static void CheckFunctionName(v8::Handle<v8::Script> script,
// Obtain SharedFunctionInfo for the function.
isolate->debug()->PrepareForBreakPoints();
- Object* shared_func_info_ptr =
- isolate->debug()->FindSharedFunctionInfoInScript(i_script, func_pos);
- CHECK(shared_func_info_ptr != CcTest::heap()->undefined_value());
- Handle<SharedFunctionInfo> shared_func_info(
- SharedFunctionInfo::cast(shared_func_info_ptr));
+ Handle<SharedFunctionInfo> shared_func_info =
+ Handle<SharedFunctionInfo>::cast(
+ isolate->debug()->FindSharedFunctionInfoInScript(i_script, func_pos));
// Verify inferred function name.
SmartArrayPointer<char> inferred_name =
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 5c9d2e69f0..d00532f48d 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -36,7 +36,6 @@
#include "src/debug.h"
#include "src/hashmap.h"
#include "src/heap-profiler.h"
-#include "src/snapshot.h"
#include "src/utils-inl.h"
#include "test/cctest/cctest.h"
@@ -183,8 +182,7 @@ TEST(HeapSnapshot) {
"var a2 = new A2();\n"
"var b2_1 = new B2(a2), b2_2 = new B2(a2);\n"
"var c2 = new C2(a2);");
- const v8::HeapSnapshot* snapshot_env2 =
- heap_profiler->TakeHeapSnapshot(v8_str("env2"));
+ const v8::HeapSnapshot* snapshot_env2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot_env2));
const v8::HeapGraphNode* global_env2 = GetGlobalObject(snapshot_env2);
@@ -217,8 +215,7 @@ TEST(HeapSnapshotObjectSizes) {
"x = new X(new X(), new X());\n"
"dummy = new X();\n"
"(function() { x.a.a = x.b; })();");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("sizes"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* x =
@@ -246,8 +243,7 @@ TEST(BoundFunctionInSnapshot) {
"function myFunction(a, b) { this.a = a; this.b = b; }\n"
"function AAAAA() {}\n"
"boundFunction = myFunction.bind(new AAAAA(), 20, new Number(12)); \n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("sizes"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* f =
@@ -286,8 +282,7 @@ TEST(HeapSnapshotEntryChildren) {
CompileRun(
"function A() { }\n"
"a = new A;");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("children"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
for (int i = 0, count = global->GetChildrenCount(); i < count; ++i) {
@@ -314,8 +309,7 @@ TEST(HeapSnapshotCodeObjects) {
"function compiled(x) { return x + 1; }\n"
"var anonymous = (function() { return function() { return 0; } })();\n"
"compiled(1)");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("code"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
@@ -387,8 +381,7 @@ TEST(HeapSnapshotHeapNumbers) {
CompileRun(
"a = 1; // a is Smi\n"
"b = 2.5; // b is HeapNumber");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("numbers"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK(!GetProperty(global, v8::HeapGraphEdge::kProperty, "a"));
@@ -413,8 +406,7 @@ TEST(HeapSnapshotSlicedString) {
"123456789.123456789.123456789.123456789.123456789."
"123456789.123456789.123456789.123456789.123456789.\";"
"child_string = parent_string.slice(100);");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("strings"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* parent_string =
@@ -451,8 +443,7 @@ TEST(HeapSnapshotConsString) {
global->SetInternalField(0, v8::ToApiHandle<v8::String>(cons_string));
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("cons_strings"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global_node = GetGlobalObject(snapshot);
@@ -479,8 +470,7 @@ TEST(HeapSnapshotSymbol) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("a = Symbol('mySymbol');\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("Symbol"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* a =
@@ -504,8 +494,7 @@ TEST(HeapSnapshotWeakCollection) {
"k = {}; v = {}; s = 'str';\n"
"ws = new WeakSet(); ws.add(k); ws.add(v); ws[s] = s;\n"
"wm = new WeakMap(); wm.set(k, v); wm[s] = s;\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("WeakCollections"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* k =
@@ -578,8 +567,7 @@ TEST(HeapSnapshotCollection) {
"k = {}; v = {}; s = 'str';\n"
"set = new Set(); set.add(k); set.add(v); set[s] = s;\n"
"map = new Map(); map.set(k, v); map[s] = s;\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("Collections"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* k =
@@ -656,8 +644,7 @@ TEST(HeapSnapshotInternalReferences) {
global->SetInternalField(0, v8_num(17));
global->SetInternalField(1, obj);
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("internals"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global_node = GetGlobalObject(snapshot);
// The first reference will not present, because it's a Smi.
@@ -677,8 +664,7 @@ TEST(HeapSnapshotAddressReuse) {
"var a = [];\n"
"for (var i = 0; i < 10000; ++i)\n"
" a[i] = new A();\n");
- const v8::HeapSnapshot* snapshot1 =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot1"));
+ const v8::HeapSnapshot* snapshot1 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot1));
v8::SnapshotObjectId maxId1 = snapshot1->GetMaxSnapshotJSObjectId();
@@ -687,8 +673,7 @@ TEST(HeapSnapshotAddressReuse) {
" a[i] = new A();\n");
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- const v8::HeapSnapshot* snapshot2 =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot2"));
+ const v8::HeapSnapshot* snapshot2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot2));
const v8::HeapGraphNode* global2 = GetGlobalObject(snapshot2);
@@ -721,8 +706,7 @@ TEST(HeapEntryIdsAndArrayShift) {
"var a = new Array();\n"
"for (var i = 0; i < 10; ++i)\n"
" a.push(new AnObject());\n");
- const v8::HeapSnapshot* snapshot1 =
- heap_profiler->TakeHeapSnapshot(v8_str("s1"));
+ const v8::HeapSnapshot* snapshot1 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot1));
CompileRun(
@@ -731,8 +715,7 @@ TEST(HeapEntryIdsAndArrayShift) {
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- const v8::HeapSnapshot* snapshot2 =
- heap_profiler->TakeHeapSnapshot(v8_str("s2"));
+ const v8::HeapSnapshot* snapshot2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot2));
const v8::HeapGraphNode* global1 = GetGlobalObject(snapshot1);
@@ -768,16 +751,12 @@ TEST(HeapEntryIdsAndGC) {
"function B(x) { this.x = x; }\n"
"var a = new A();\n"
"var b = new B(a);");
- v8::Local<v8::String> s1_str = v8_str("s1");
- v8::Local<v8::String> s2_str = v8_str("s2");
- const v8::HeapSnapshot* snapshot1 =
- heap_profiler->TakeHeapSnapshot(s1_str);
+ const v8::HeapSnapshot* snapshot1 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot1));
CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
- const v8::HeapSnapshot* snapshot2 =
- heap_profiler->TakeHeapSnapshot(s2_str);
+ const v8::HeapSnapshot* snapshot2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot2));
CHECK_GT(snapshot1->GetMaxSnapshotJSObjectId(), 7000u);
@@ -827,8 +806,7 @@ TEST(HeapSnapshotRootPreservedAfterSorting) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("s"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* root1 = snapshot->GetRoot();
const_cast<i::HeapSnapshot*>(reinterpret_cast<const i::HeapSnapshot*>(
@@ -896,8 +874,7 @@ TEST(HeapSnapshotJSONSerialization) {
"function B(x) { this.x = x; }\n"
"var a = new A(" STRING_LITERAL_FOR_TEST ");\n"
"var b = new B(a);");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("json"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
TestJSONStream stream;
@@ -996,8 +973,7 @@ TEST(HeapSnapshotJSONSerializationAborting) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("abort"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
TestJSONStream stream(5);
snapshot->Serialize(&stream, v8::HeapSnapshot::kJSON);
@@ -1067,9 +1043,12 @@ static TestStatsStream GetHeapStatsUpdate(
v8::HeapProfiler* heap_profiler,
v8::SnapshotObjectId* object_id = NULL) {
TestStatsStream stream;
- v8::SnapshotObjectId last_seen_id = heap_profiler->GetHeapStats(&stream);
+ int64_t timestamp = -1;
+ v8::SnapshotObjectId last_seen_id =
+ heap_profiler->GetHeapStats(&stream, &timestamp);
if (object_id)
*object_id = last_seen_id;
+ CHECK_NE(-1, timestamp);
CHECK_EQ(1, stream.eos_signaled());
return stream;
}
@@ -1279,8 +1258,7 @@ TEST(HeapSnapshotGetNodeById) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("id"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* root = snapshot->GetRoot();
CheckChildrenIds(snapshot, root, 0, 3);
@@ -1294,8 +1272,7 @@ TEST(HeapSnapshotGetSnapshotObjectId) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("globalObject = {};\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("get_snapshot_object_id"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* global_object =
@@ -1318,8 +1295,7 @@ TEST(HeapSnapshotUnknownSnapshotObjectId) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("globalObject = {};\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("unknown_object_id"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* node =
snapshot->GetNodeById(v8::HeapProfiler::kUnknownObjectId);
@@ -1357,16 +1333,13 @@ TEST(TakeHeapSnapshotAborting) {
const int snapshots_count = heap_profiler->GetSnapshotCount();
TestActivityControl aborting_control(1);
const v8::HeapSnapshot* no_snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("abort"),
- &aborting_control);
+ heap_profiler->TakeHeapSnapshot(&aborting_control);
CHECK(!no_snapshot);
CHECK_EQ(snapshots_count, heap_profiler->GetSnapshotCount());
CHECK_GT(aborting_control.total(), aborting_control.done());
TestActivityControl control(-1); // Don't abort.
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("full"),
- &control);
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot(&control);
CHECK(ValidateSnapshot(snapshot));
CHECK(snapshot);
@@ -1477,8 +1450,7 @@ TEST(HeapSnapshotRetainedObjectInfo) {
v8::Persistent<v8::String> p_CCC(isolate, v8_str("CCC"));
p_CCC.SetWrapperClassId(2);
CHECK_EQ(0, TestRetainedObjectInfo::instances.length());
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("retained"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
CHECK_EQ(3, TestRetainedObjectInfo::instances.length());
@@ -1570,8 +1542,7 @@ TEST(HeapSnapshotImplicitReferences) {
GraphWithImplicitRefs graph(&env);
v8::V8::AddGCPrologueCallback(&GraphWithImplicitRefs::gcPrologue);
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("implicit_refs"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global_object = GetGlobalObject(snapshot);
@@ -1604,28 +1575,25 @@ TEST(DeleteAllHeapSnapshots) {
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
heap_profiler->DeleteAllHeapSnapshots();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK(heap_profiler->TakeHeapSnapshot(v8_str("1")));
+ CHECK(heap_profiler->TakeHeapSnapshot());
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
heap_profiler->DeleteAllHeapSnapshots();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK(heap_profiler->TakeHeapSnapshot(v8_str("1")));
- CHECK(heap_profiler->TakeHeapSnapshot(v8_str("2")));
+ CHECK(heap_profiler->TakeHeapSnapshot());
+ CHECK(heap_profiler->TakeHeapSnapshot());
CHECK_EQ(2, heap_profiler->GetSnapshotCount());
heap_profiler->DeleteAllHeapSnapshots();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
}
-static const v8::HeapSnapshot* FindHeapSnapshot(v8::HeapProfiler* profiler,
- unsigned uid) {
+static bool FindHeapSnapshot(v8::HeapProfiler* profiler,
+ const v8::HeapSnapshot* snapshot) {
int length = profiler->GetSnapshotCount();
for (int i = 0; i < length; i++) {
- const v8::HeapSnapshot* snapshot = profiler->GetHeapSnapshot(i);
- if (snapshot->GetUid() == uid) {
- return snapshot;
- }
+ if (snapshot == profiler->GetHeapSnapshot(i)) return true;
}
- return NULL;
+ return false;
}
@@ -1635,38 +1603,31 @@ TEST(DeleteHeapSnapshot) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- const v8::HeapSnapshot* s1 =
- heap_profiler->TakeHeapSnapshot(v8_str("1"));
+ const v8::HeapSnapshot* s1 = heap_profiler->TakeHeapSnapshot();
CHECK(s1);
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
- unsigned uid1 = s1->GetUid();
- CHECK_EQ(s1, FindHeapSnapshot(heap_profiler, uid1));
+ CHECK(FindHeapSnapshot(heap_profiler, s1));
const_cast<v8::HeapSnapshot*>(s1)->Delete();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK(!FindHeapSnapshot(heap_profiler, uid1));
+ CHECK(!FindHeapSnapshot(heap_profiler, s1));
- const v8::HeapSnapshot* s2 =
- heap_profiler->TakeHeapSnapshot(v8_str("2"));
+ const v8::HeapSnapshot* s2 = heap_profiler->TakeHeapSnapshot();
CHECK(s2);
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
- unsigned uid2 = s2->GetUid();
- CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid2));
- CHECK_EQ(s2, FindHeapSnapshot(heap_profiler, uid2));
- const v8::HeapSnapshot* s3 =
- heap_profiler->TakeHeapSnapshot(v8_str("3"));
+ CHECK(FindHeapSnapshot(heap_profiler, s2));
+ const v8::HeapSnapshot* s3 = heap_profiler->TakeHeapSnapshot();
CHECK(s3);
CHECK_EQ(2, heap_profiler->GetSnapshotCount());
- unsigned uid3 = s3->GetUid();
- CHECK_NE(static_cast<int>(uid1), static_cast<int>(uid3));
- CHECK_EQ(s3, FindHeapSnapshot(heap_profiler, uid3));
+ CHECK_NE(s2, s3);
+ CHECK(FindHeapSnapshot(heap_profiler, s3));
const_cast<v8::HeapSnapshot*>(s2)->Delete();
CHECK_EQ(1, heap_profiler->GetSnapshotCount());
- CHECK(!FindHeapSnapshot(heap_profiler, uid2));
- CHECK_EQ(s3, FindHeapSnapshot(heap_profiler, uid3));
+ CHECK(!FindHeapSnapshot(heap_profiler, s2));
+ CHECK(FindHeapSnapshot(heap_profiler, s3));
const_cast<v8::HeapSnapshot*>(s3)->Delete();
CHECK_EQ(0, heap_profiler->GetSnapshotCount());
- CHECK(!FindHeapSnapshot(heap_profiler, uid3));
+ CHECK(!FindHeapSnapshot(heap_profiler, s3));
}
@@ -1687,9 +1648,7 @@ TEST(GlobalObjectName) {
NameResolver name_resolver;
const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("document"),
- NULL,
- &name_resolver);
+ heap_profiler->TakeHeapSnapshot(NULL, &name_resolver);
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK(global);
@@ -1705,8 +1664,7 @@ TEST(GlobalObjectFields) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("obj = {};");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* builtins =
@@ -1728,10 +1686,9 @@ TEST(NoHandleLeaks) {
CompileRun("document = { URL:\"abcdefgh\" };");
- v8::Handle<v8::String> name(v8_str("leakz"));
i::Isolate* isolate = CcTest::i_isolate();
int count_before = i::HandleScope::NumberOfHandles(isolate);
- heap_profiler->TakeHeapSnapshot(name);
+ heap_profiler->TakeHeapSnapshot();
int count_after = i::HandleScope::NumberOfHandles(isolate);
CHECK_EQ(count_before, count_after);
}
@@ -1741,8 +1698,7 @@ TEST(NodesIteration) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("iteration"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK(global);
@@ -1763,8 +1719,7 @@ TEST(GetHeapValueForNode) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("a = { s_prop: \'value\', n_prop: \'value2\' };");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("value"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK(heap_profiler->FindObjectById(global->GetId())->IsObject());
@@ -1798,8 +1753,7 @@ TEST(GetHeapValueForDeletedObject) {
// property of the "a" object. Also, the "p" object can't be an empty one
// because the empty object is static and isn't actually deleted.
CompileRun("a = { p: { r: {} } };");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* obj = GetProperty(
@@ -1889,8 +1843,7 @@ TEST(FastCaseAccessors) {
"obj1.__defineSetter__('propWithSetter', function Z(value) {\n"
" return this.value_ = value;\n"
"});\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("fastCaseAccessors"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
@@ -1935,8 +1888,7 @@ TEST(FastCaseRedefinedAccessors) {
v8::Utils::OpenHandle(*js_global->Get(v8_str("obj1")).As<v8::Object>());
USE(js_obj1);
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("fastCaseAccessors"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK(global);
@@ -1964,8 +1916,7 @@ TEST(SlowCaseAccessors) {
"obj1.__defineSetter__('propWithSetter', function Z(value) {\n"
" return this.value_ = value;\n"
"});\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("slowCaseAccessors"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
@@ -1994,8 +1945,7 @@ TEST(HiddenPropertiesFastCase) {
CompileRun(
"function C(x) { this.a = this; this.b = x; }\n"
"c = new C(2012);\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("HiddenPropertiesFastCase1"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* c =
@@ -2010,8 +1960,7 @@ TEST(HiddenPropertiesFastCase) {
CHECK(!cHandle.IsEmpty() && cHandle->IsObject());
cHandle->ToObject(isolate)->SetHiddenValue(v8_str("key"), v8_str("val"));
- snapshot = heap_profiler->TakeHeapSnapshot(
- v8_str("HiddenPropertiesFastCase2"));
+ snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
global = GetGlobalObject(snapshot);
c = GetProperty(global, v8::HeapGraphEdge::kProperty, "c");
@@ -2028,8 +1977,7 @@ TEST(AccessorInfo) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("function foo(x) { }\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("AccessorInfoTest"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* foo =
@@ -2074,8 +2022,7 @@ bool HasWeakEdge(const v8::HeapGraphNode* node) {
bool HasWeakGlobalHandle() {
v8::Isolate* isolate = CcTest::isolate();
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("weaks"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* gc_roots = GetNode(
snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "(GC roots)");
@@ -2115,8 +2062,7 @@ TEST(SfiAndJsFunctionWeakRefs) {
CompileRun(
"fun = (function (x) { return function () { return x + 1; } })(1);");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("fun"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
CHECK(global);
@@ -2136,8 +2082,7 @@ TEST(NoDebugObjectInSnapshot) {
CHECK(CcTest::i_isolate()->debug()->Load());
CompileRun("foo = {};");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* root = snapshot->GetRoot();
int globals_count = 0;
@@ -2161,8 +2106,7 @@ TEST(AllStrongGcRootsHaveNames) {
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("foo = {};");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* gc_roots = GetNode(
snapshot->GetRoot(), v8::HeapGraphNode::kSynthetic, "(GC roots)");
@@ -2184,8 +2128,7 @@ TEST(NoRefsToNonEssentialEntries) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("global_object = {};\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* global_object =
@@ -2205,8 +2148,7 @@ TEST(MapHasDescriptorsAndTransitions) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("obj = { a: 10 };\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* global_object =
@@ -2244,8 +2186,7 @@ TEST(ManyLocalsInSharedContext) {
"result.push('return f_' + (n - 1) + ';');"
"result.push('})()');"
"var ok = eval(result.join('\\n'));");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
@@ -2279,8 +2220,7 @@ TEST(AllocationSitesAreVisible) {
CompileRun(
"fun = function () { var a = [3, 2, 1]; return a; }\n"
"fun();");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
@@ -2292,11 +2232,11 @@ TEST(AllocationSitesAreVisible) {
GetProperty(fun_code, v8::HeapGraphEdge::kInternal, "literals");
CHECK(literals);
CHECK_EQ(v8::HeapGraphNode::kArray, literals->GetType());
- CHECK_EQ(2, literals->GetChildrenCount());
+ CHECK_EQ(1, literals->GetChildrenCount());
- // The second value in the literals array should be the boilerplate,
+ // The first value in the literals array should be the boilerplate,
// after an AllocationSite.
- const v8::HeapGraphEdge* prop = literals->GetChild(1);
+ const v8::HeapGraphEdge* prop = literals->GetChild(0);
const v8::HeapGraphNode* allocation_site = prop->GetToNode();
v8::String::Utf8Value name(allocation_site->GetName());
CHECK_EQ(0, strcmp("system / AllocationSite", *name));
@@ -2333,8 +2273,7 @@ TEST(JSFunctionHasCodeLink) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("function foo(x, y) { return x + y; }\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* foo_func =
@@ -2375,8 +2314,7 @@ TEST(CheckCodeNames) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("var a = 1.1;");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("CheckCodeNames"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const char* stub_path[] = {
@@ -2634,8 +2572,7 @@ TEST(ArrayBufferAndArrayBufferView) {
v8::HandleScope scope(env->GetIsolate());
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun("arr1 = new Uint32Array(100);\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* arr1_obj =
@@ -2693,8 +2630,7 @@ TEST(ArrayBufferSharedBackingStore) {
v8::Handle<v8::Value> result = CompileRun("ab2.byteLength");
CHECK_EQ(1024, result->Int32Value());
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* ab1_node =
@@ -2728,8 +2664,7 @@ TEST(BoxObject) {
global->Set(0, v8::ToApiHandle<v8::Object>(box));
v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global_node = GetGlobalObject(snapshot);
const v8::HeapGraphNode* box_node =
@@ -2756,8 +2691,7 @@ TEST(WeakContainers) {
"foo(obj);\n"
"%OptimizeFunctionOnNextCall(foo);\n"
"foo(obj);\n");
- const v8::HeapSnapshot* snapshot =
- heap_profiler->TakeHeapSnapshot(v8_str("snapshot"));
+ const v8::HeapSnapshot* snapshot = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot));
const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
const v8::HeapGraphNode* obj =
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index ae3c1d365c..9867f933b1 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -36,10 +36,10 @@
#include "src/global-handles.h"
#include "src/ic/ic.h"
#include "src/macro-assembler.h"
-#include "src/snapshot.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
+using v8::Just;
static void CheckMap(Map* map, int type, int instance_size) {
CHECK(map->IsHeapObject());
@@ -191,9 +191,7 @@ TEST(HeapObjects) {
Handle<String> object_string = Handle<String>::cast(factory->Object_string());
Handle<GlobalObject> global(CcTest::i_isolate()->context()->global_object());
- v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(global, object_string);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(global, object_string));
// Check ToString for oddballs
CheckOddball(isolate, heap->true_value(), "true");
@@ -260,9 +258,7 @@ TEST(GarbageCollection) {
heap->CollectGarbage(NEW_SPACE);
// Function should be alive.
- v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(global, name);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(global, name));
// Check function is retained.
Handle<Object> func_value =
Object::GetProperty(global, name).ToHandleChecked();
@@ -280,9 +276,7 @@ TEST(GarbageCollection) {
// After gc, it should survive.
heap->CollectGarbage(NEW_SPACE);
- maybe = JSReceiver::HasOwnProperty(global, obj_name);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(global, obj_name));
Handle<Object> obj =
Object::GetProperty(global, obj_name).ToHandleChecked();
CHECK(obj->IsJSObject());
@@ -639,85 +633,55 @@ TEST(ObjectProperties) {
Handle<Smi> two(Smi::FromInt(2), isolate);
// check for empty
- v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, first);
- CHECK(maybe.has_value);
- CHECK(!maybe.value);
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
// add first
JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
- maybe = JSReceiver::HasOwnProperty(obj, first);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
// delete first
JSReceiver::DeleteProperty(obj, first, SLOPPY).Check();
- maybe = JSReceiver::HasOwnProperty(obj, first);
- CHECK(maybe.has_value);
- CHECK(!maybe.value);
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
// add first and then second
JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
JSReceiver::SetProperty(obj, second, two, SLOPPY).Check();
- maybe = JSReceiver::HasOwnProperty(obj, first);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
- maybe = JSReceiver::HasOwnProperty(obj, second);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
// delete first and then second
JSReceiver::DeleteProperty(obj, first, SLOPPY).Check();
- maybe = JSReceiver::HasOwnProperty(obj, second);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
JSReceiver::DeleteProperty(obj, second, SLOPPY).Check();
- maybe = JSReceiver::HasOwnProperty(obj, first);
- CHECK(maybe.has_value);
- CHECK(!maybe.value);
- maybe = JSReceiver::HasOwnProperty(obj, second);
- CHECK(maybe.has_value);
- CHECK(!maybe.value);
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
// add first and then second
JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
JSReceiver::SetProperty(obj, second, two, SLOPPY).Check();
- maybe = JSReceiver::HasOwnProperty(obj, first);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
- maybe = JSReceiver::HasOwnProperty(obj, second);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, second));
// delete second and then first
JSReceiver::DeleteProperty(obj, second, SLOPPY).Check();
- maybe = JSReceiver::HasOwnProperty(obj, first);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, first));
JSReceiver::DeleteProperty(obj, first, SLOPPY).Check();
- maybe = JSReceiver::HasOwnProperty(obj, first);
- CHECK(maybe.has_value);
- CHECK(!maybe.value);
- maybe = JSReceiver::HasOwnProperty(obj, second);
- CHECK(maybe.has_value);
- CHECK(!maybe.value);
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, first));
+ CHECK(Just(false) == JSReceiver::HasOwnProperty(obj, second));
// check string and internalized string match
const char* string1 = "fisk";
Handle<String> s1 = factory->NewStringFromAsciiChecked(string1);
JSReceiver::SetProperty(obj, s1, one, SLOPPY).Check();
Handle<String> s1_string = factory->InternalizeUtf8String(string1);
- maybe = JSReceiver::HasOwnProperty(obj, s1_string);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s1_string));
// check internalized string and string match
const char* string2 = "fugl";
Handle<String> s2_string = factory->InternalizeUtf8String(string2);
JSReceiver::SetProperty(obj, s2_string, one, SLOPPY).Check();
Handle<String> s2 = factory->NewStringFromAsciiChecked(string2);
- maybe = JSReceiver::HasOwnProperty(obj, s2);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(obj, s2));
}
@@ -1502,6 +1466,7 @@ TEST(TestInternalWeakLists) {
// Some flags turn Scavenge collections into Mark-sweep collections
// and hence are incompatible with this test case.
if (FLAG_gc_global || FLAG_stress_compaction) return;
+ FLAG_retain_maps_for_n_gc = 0;
static const int kNumTestContexts = 10;
@@ -2176,6 +2141,12 @@ TEST(InstanceOfStubWriteBarrier) {
}
+static int NumberOfProtoTransitions(Map* map) {
+ return TransitionArray::NumberOfPrototypeTransitions(
+ TransitionArray::GetPrototypeTransitions(map));
+}
+
+
TEST(PrototypeTransitionClearing) {
if (FLAG_never_compact) return;
CcTest::InitializeVM();
@@ -2188,7 +2159,7 @@ TEST(PrototypeTransitionClearing) {
v8::Utils::OpenHandle(
*v8::Handle<v8::Object>::Cast(
CcTest::global()->Get(v8_str("base"))));
- int initialTransitions = baseObject->map()->NumberOfProtoTransitions();
+ int initialTransitions = NumberOfProtoTransitions(baseObject->map());
CompileRun(
"var live = [];"
@@ -2201,16 +2172,17 @@ TEST(PrototypeTransitionClearing) {
// Verify that only dead prototype transitions are cleared.
CHECK_EQ(initialTransitions + 10,
- baseObject->map()->NumberOfProtoTransitions());
+ NumberOfProtoTransitions(baseObject->map()));
CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
const int transitions = 10 - 3;
CHECK_EQ(initialTransitions + transitions,
- baseObject->map()->NumberOfProtoTransitions());
+ NumberOfProtoTransitions(baseObject->map()));
// Verify that prototype transitions array was compacted.
- FixedArray* trans = baseObject->map()->GetPrototypeTransitions();
+ FixedArray* trans =
+ TransitionArray::GetPrototypeTransitions(baseObject->map());
for (int i = initialTransitions; i < initialTransitions + transitions; i++) {
- int j = Map::kProtoTransitionHeaderSize + i;
+ int j = TransitionArray::kProtoTransitionHeaderSize + i;
CHECK(trans->get(j)->IsMap());
}
@@ -2228,7 +2200,7 @@ TEST(PrototypeTransitionClearing) {
i::FLAG_always_compact = true;
Handle<Map> map(baseObject->map());
CHECK(!space->LastPage()->Contains(
- map->GetPrototypeTransitions()->address()));
+ TransitionArray::GetPrototypeTransitions(*map)->address()));
CHECK(space->LastPage()->Contains(prototype->address()));
}
@@ -2267,9 +2239,12 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
marking->Start();
// The following two calls will increment CcTest::heap()->global_ic_age().
- const int kLongIdlePauseInMs = 1000;
+ const double kLongIdlePauseInSeconds = 1.0;
CcTest::isolate()->ContextDisposedNotification();
- CcTest::isolate()->IdleNotification(kLongIdlePauseInMs);
+ CcTest::isolate()->IdleNotificationDeadline(
+ (v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
+ static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
+ kLongIdlePauseInSeconds);
while (!marking->IsStopped() && !marking->IsComplete()) {
marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
@@ -2323,9 +2298,12 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
// The following two calls will increment CcTest::heap()->global_ic_age().
// Since incremental marking is off, IdleNotification will do full GC.
- const int kLongIdlePauseInMs = 1000;
+ const double kLongIdlePauseInSeconds = 1.0;
CcTest::isolate()->ContextDisposedNotification();
- CcTest::isolate()->IdleNotification(kLongIdlePauseInMs);
+ CcTest::isolate()->IdleNotificationDeadline(
+ (v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
+ static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
+ kLongIdlePauseInSeconds);
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
@@ -2368,9 +2346,14 @@ TEST(IdleNotificationFinishMarking) {
CHECK(!marking->IsIdleMarkingDelayCounterLimitReached());
}
+ marking->SetWeakClosureWasOverApproximatedForTesting(true);
+
// The next idle notification has to finish incremental marking.
- const int kLongIdleTime = 1000000;
- CcTest::isolate()->IdleNotification(kLongIdleTime);
+ const double kLongIdleTime = 1000.0;
+ CcTest::isolate()->IdleNotificationDeadline(
+ (v8::base::TimeTicks::HighResolutionNow().ToInternalValue() /
+ static_cast<double>(v8::base::Time::kMicrosecondsPerSecond)) +
+ kLongIdleTime);
CHECK_EQ(CcTest::heap()->gc_count(), 1);
}
@@ -2550,8 +2533,8 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
} else {
CHECK_EQ(2.2, inner_object->RawFastDoublePropertyAt(idx1));
}
- CHECK(CcTest::heap()->InOldPointerSpace(
- inner_object->RawFastPropertyAt(idx2)));
+ CHECK(
+ CcTest::heap()->InOldPointerSpace(inner_object->RawFastPropertyAt(idx2)));
}
@@ -2911,7 +2894,7 @@ TEST(OptimizedAllocationArrayLiterals) {
static int CountMapTransitions(Map* map) {
- return map->transitions()->number_of_transitions();
+ return TransitionArray::NumberOfTransitions(map->raw_transitions());
}
@@ -2921,6 +2904,7 @@ TEST(Regress1465) {
i::FLAG_stress_compaction = false;
i::FLAG_allow_natives_syntax = true;
i::FLAG_trace_incremental_marking = true;
+ i::FLAG_retain_maps_for_n_gc = 0;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
static const int transitions_count = 256;
@@ -2983,6 +2967,7 @@ static void AddPropertyTo(
Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
i::FLAG_gc_interval = gc_count;
i::FLAG_gc_global = true;
+ i::FLAG_retain_maps_for_n_gc = 0;
CcTest::heap()->set_allocation_timeout(gc_count);
JSReceiver::SetProperty(object, prop_name, twenty_three, SLOPPY).Check();
}
@@ -3090,7 +3075,7 @@ TEST(TransitionArraySimpleToFull) {
CompileRun("o = new F;"
"root = new F");
root = GetByName("root");
- DCHECK(root->map()->transitions()->IsSimpleTransition());
+ DCHECK(TransitionArray::IsSimpleTransition(root->map()->raw_transitions()));
AddPropertyTo(2, root, "happy");
// Count number of live transitions after marking. Note that one transition
@@ -4090,7 +4075,8 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
if (marking->IsStopped()) marking->Start();
// This big step should be sufficient to mark the whole array.
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- DCHECK(marking->IsComplete());
+ DCHECK(marking->IsComplete() ||
+ marking->IsReadyToOverApproximateWeakClosure());
}
@@ -4181,7 +4167,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
// Now make sure that a gc should get rid of the function, even though we
// still have the allocation site alive.
for (int i = 0; i < 4; i++) {
- heap->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
}
// The site still exists because of our global handle, but the code is no
@@ -4283,6 +4269,7 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
i::FLAG_weak_embedded_objects_in_optimized_code = true;
i::FLAG_allow_natives_syntax = true;
i::FLAG_compilation_cache = false;
+ i::FLAG_retain_maps_for_n_gc = 0;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
@@ -4608,17 +4595,33 @@ Handle<JSFunction> GetFunctionByName(Isolate* isolate, const char* name) {
}
-void CheckIC(Code* code, Code::Kind kind, InlineCacheState state) {
- Code* ic = FindFirstIC(code, kind);
- CHECK(ic->is_inline_cache_stub());
- CHECK(ic->ic_state() == state);
+void CheckIC(Code* code, Code::Kind kind, SharedFunctionInfo* shared,
+ int ic_slot, InlineCacheState state) {
+ if (FLAG_vector_ics &&
+ (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC ||
+ kind == Code::CALL_IC)) {
+ TypeFeedbackVector* vector = shared->feedback_vector();
+ FeedbackVectorICSlot slot(ic_slot);
+ if (kind == Code::LOAD_IC) {
+ LoadICNexus nexus(vector, slot);
+ CHECK_EQ(nexus.StateFromFeedback(), state);
+ } else if (kind == Code::KEYED_LOAD_IC) {
+ KeyedLoadICNexus nexus(vector, slot);
+ CHECK_EQ(nexus.StateFromFeedback(), state);
+ } else if (kind == Code::CALL_IC) {
+ CallICNexus nexus(vector, slot);
+ CHECK_EQ(nexus.StateFromFeedback(), state);
+ }
+ } else {
+ Code* ic = FindFirstIC(code, kind);
+ CHECK(ic->is_inline_cache_stub());
+ CHECK(ic->ic_state() == state);
+ }
}
TEST(MonomorphicStaysMonomorphicAfterGC) {
if (FLAG_always_opt) return;
- // TODO(mvstanton): vector ics need weak support!
- if (FLAG_vector_ics) return;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
@@ -4641,19 +4644,17 @@ TEST(MonomorphicStaysMonomorphicAfterGC) {
CompileRun("(testIC())");
}
heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CheckIC(loadIC->code(), Code::LOAD_IC, MONOMORPHIC);
+ CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, MONOMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CheckIC(loadIC->code(), Code::LOAD_IC, MONOMORPHIC);
+ CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, MONOMORPHIC);
}
TEST(PolymorphicStaysPolymorphicAfterGC) {
if (FLAG_always_opt) return;
- // TODO(mvstanton): vector ics need weak support!
- if (FLAG_vector_ics) return;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
@@ -4679,12 +4680,12 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
CompileRun("(testIC())");
}
heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CheckIC(loadIC->code(), Code::LOAD_IC, POLYMORPHIC);
+ CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, POLYMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CheckIC(loadIC->code(), Code::LOAD_IC, POLYMORPHIC);
+ CheckIC(loadIC->code(), Code::LOAD_IC, loadIC->shared(), 0, POLYMORPHIC);
}
@@ -4878,7 +4879,7 @@ TEST(ArrayShiftSweeping) {
UNINITIALIZED_TEST(PromotionQueue) {
i::FLAG_expose_gc = true;
- i::FLAG_max_semi_space_size = 2;
+ i::FLAG_max_semi_space_size = 2 * (Page::kPageSize / MB);
v8::Isolate* isolate = v8::Isolate::New();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
@@ -5073,16 +5074,109 @@ TEST(Regress442710) {
TEST(NumberStringCacheSize) {
- if (!Snapshot::HaveASnapshotToStartFrom()) return;
// Test that the number-string cache has not been resized in the snapshot.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
+ if (!isolate->snapshot_available()) return;
Heap* heap = isolate->heap();
CHECK_EQ(TestHeap::kInitialNumberStringCacheSize * 2,
heap->number_string_cache()->length());
}
+TEST(Regress3877) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
+ CompileRun("function cls() { this.x = 10; }");
+ Handle<WeakCell> weak_prototype;
+ {
+ HandleScope inner_scope(isolate);
+ v8::Local<v8::Value> result = CompileRun("cls.prototype");
+ Handle<JSObject> proto =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(result));
+ weak_prototype = inner_scope.CloseAndEscape(factory->NewWeakCell(proto));
+ }
+ CHECK(!weak_prototype->cleared());
+ CompileRun(
+ "var a = { };"
+ "a.x = new cls();"
+ "cls.prototype = null;");
+ for (int i = 0; i < 4; i++) {
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ }
+ // The map of a.x keeps prototype alive
+ CHECK(!weak_prototype->cleared());
+ // Change the map of a.x and make the previous map garbage collectable.
+ CompileRun("a.x.__proto__ = {};");
+ for (int i = 0; i < 4; i++) {
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ }
+ CHECK(weak_prototype->cleared());
+}
+
+
+Handle<WeakCell> AddRetainedMap(Isolate* isolate, Heap* heap) {
+ HandleScope inner_scope(isolate);
+ Handle<Map> map = Map::Create(isolate, 1);
+ v8::Local<v8::Value> result =
+ CompileRun("(function () { return {x : 10}; })();");
+ Handle<JSObject> proto =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(result));
+ map->set_prototype(*proto);
+ heap->AddRetainedMap(map);
+ return inner_scope.CloseAndEscape(Map::WeakCellForMap(map));
+}
+
+
+void CheckMapRetainingFor(int n) {
+ FLAG_retain_maps_for_n_gc = n;
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ Handle<WeakCell> weak_cell = AddRetainedMap(isolate, heap);
+ CHECK(!weak_cell->cleared());
+ for (int i = 0; i < n; i++) {
+ heap->CollectGarbage(OLD_POINTER_SPACE);
+ }
+ CHECK(!weak_cell->cleared());
+ heap->CollectGarbage(OLD_POINTER_SPACE);
+ CHECK(weak_cell->cleared());
+}
+
+
+TEST(MapRetaining) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ CheckMapRetainingFor(FLAG_retain_maps_for_n_gc);
+ CheckMapRetainingFor(0);
+ CheckMapRetainingFor(1);
+ CheckMapRetainingFor(7);
+}
+
+
+TEST(RegressArrayListGC) {
+ FLAG_retain_maps_for_n_gc = 1;
+ FLAG_incremental_marking = 0;
+ FLAG_gc_global = true;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Heap* heap = isolate->heap();
+ AddRetainedMap(isolate, heap);
+ Handle<Map> map = Map::Create(isolate, 1);
+ heap->CollectGarbage(OLD_POINTER_SPACE);
+ // Force GC in old space on next addition of retained map.
+ Map::WeakCellForMap(map);
+ SimulateFullSpace(CcTest::heap()->new_space());
+ for (int i = 0; i < 10; i++) {
+ heap->AddRetainedMap(map);
+ }
+ heap->CollectGarbage(OLD_POINTER_SPACE);
+}
+
+
#ifdef DEBUG
TEST(PathTracer) {
CcTest::InitializeVM();
@@ -5095,23 +5189,63 @@ TEST(PathTracer) {
#endif // DEBUG
-TEST(FirstPageFitsStartup) {
- // Test that the first page sizes provided by the default snapshot are large
- // enough to fit everything right after startup and creating one context.
- // If this test fails, we are allocating too much aside from deserialization.
- if (!Snapshot::HaveASnapshotToStartFrom()) return;
- if (Snapshot::EmbedsScript()) return;
- CcTest::InitializeVM();
- LocalContext env;
- PagedSpaces spaces(CcTest::heap());
- for (PagedSpace* s = spaces.next(); s != NULL; s = spaces.next()) {
- uint32_t default_size = s->AreaSize();
- uint32_t reduced_size = Snapshot::SizeOfFirstPage(s->identity());
- if (reduced_size == default_size) continue;
- int counter = 0;
- Page* page = NULL;
- for (PageIterator it(s); it.has_next(); page = it.next()) counter++;
- CHECK_LE(counter, 1);
- CHECK(static_cast<uint32_t>(page->area_size()) == reduced_size);
+TEST(WritableVsImmortalRoots) {
+ for (int i = 0; i < Heap::kStrongRootListLength; ++i) {
+ Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
+ bool writable = Heap::RootCanBeWrittenAfterInitialization(root_index);
+ bool immortal = Heap::RootIsImmortalImmovable(root_index);
+ // A root value can be writable, immortal, or neither, but not both.
+ CHECK(!immortal || !writable);
}
}
+
+
+static void TestRightTrimFixedTypedArray(v8::ExternalArrayType type,
+ int initial_length,
+ int elements_to_trim) {
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+
+ Handle<FixedTypedArrayBase> array =
+ factory->NewFixedTypedArray(initial_length, type);
+ int old_size = array->size();
+ heap->RightTrimFixedArray<Heap::FROM_MUTATOR>(*array, elements_to_trim);
+
+ // Check that free space filler is at the right place and did not smash the
+ // array header.
+ CHECK(array->IsFixedArrayBase());
+ CHECK_EQ(initial_length - elements_to_trim, array->length());
+ int new_size = array->size();
+ if (new_size != old_size) {
+ // Free space filler should be created in this case.
+ Address next_obj_address = array->address() + array->size();
+ CHECK(HeapObject::FromAddress(next_obj_address)->IsFiller());
+ }
+ heap->CollectAllAvailableGarbage();
+}
+
+
+TEST(Regress472513) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+
+ // The combination of type/initial_length/elements_to_trim triggered
+ // typed array header smashing with free space filler (crbug/472513).
+
+ // 64-bit cases.
+ TestRightTrimFixedTypedArray(v8::kExternalUint8Array, 32, 6);
+ TestRightTrimFixedTypedArray(v8::kExternalUint8Array, 32 - 7, 6);
+ TestRightTrimFixedTypedArray(v8::kExternalUint16Array, 16, 6);
+ TestRightTrimFixedTypedArray(v8::kExternalUint16Array, 16 - 3, 6);
+ TestRightTrimFixedTypedArray(v8::kExternalUint32Array, 8, 6);
+ TestRightTrimFixedTypedArray(v8::kExternalUint32Array, 8 - 1, 6);
+
+ // 32-bit cases.
+ TestRightTrimFixedTypedArray(v8::kExternalUint8Array, 16, 3);
+ TestRightTrimFixedTypedArray(v8::kExternalUint8Array, 16 - 3, 3);
+ TestRightTrimFixedTypedArray(v8::kExternalUint16Array, 8, 3);
+ TestRightTrimFixedTypedArray(v8::kExternalUint16Array, 8 - 1, 3);
+ TestRightTrimFixedTypedArray(v8::kExternalUint32Array, 4, 3);
+}
diff --git a/deps/v8/test/cctest/test-javascript-arm64.cc b/deps/v8/test/cctest/test-javascript-arm64.cc
index 5e4503478d..cbbbf3c22a 100644
--- a/deps/v8/test/cctest/test-javascript-arm64.cc
+++ b/deps/v8/test/cctest/test-javascript-arm64.cc
@@ -35,7 +35,6 @@
#include "src/execution.h"
#include "src/isolate.h"
#include "src/parser.h"
-#include "src/snapshot.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-js-arm64-variables.cc b/deps/v8/test/cctest/test-js-arm64-variables.cc
index 7f2771094c..98d3365b87 100644
--- a/deps/v8/test/cctest/test-js-arm64-variables.cc
+++ b/deps/v8/test/cctest/test-js-arm64-variables.cc
@@ -37,7 +37,6 @@
#include "src/execution.h"
#include "src/isolate.h"
#include "src/parser.h"
-#include "src/snapshot.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index c909a02125..b571564601 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -36,7 +36,6 @@
#include "src/isolate.h"
#include "src/parser.h"
#include "src/smart-pointers.h"
-#include "src/snapshot.h"
#include "src/unicode-inl.h"
#include "src/utils.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 4b676d2e05..2386cec55b 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -39,7 +39,7 @@
#include "src/cpu-profiler.h"
#include "src/log.h"
#include "src/log-utils.h"
-#include "src/natives.h"
+#include "src/snapshot/natives.h"
#include "src/utils.h"
#include "src/v8threads.h"
#include "src/version.h"
diff --git a/deps/v8/test/cctest/test-macro-assembler-ia32.cc b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
index b2b8c946c3..3834b18798 100644
--- a/deps/v8/test/cctest/test-macro-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-ia32.cc
@@ -33,7 +33,6 @@
#include "src/base/platform/platform.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index 7f20a8dd4b..4ff8cba68b 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -32,7 +32,6 @@
#include "src/base/platform/platform.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
#include "test/cctest/cctest.h"
namespace i = v8::internal;
@@ -98,21 +97,13 @@ typedef int (*F0)();
static void EntryCode(MacroAssembler* masm) {
// Smi constant register is callee save.
- __ pushq(i::kSmiConstantRegister);
__ pushq(i::kRootRegister);
- __ InitializeSmiConstantRegister();
__ InitializeRootRegister();
}
static void ExitCode(MacroAssembler* masm) {
- // Return -1 if kSmiConstantRegister was clobbered during the test.
- __ Move(rdx, Smi::FromInt(1));
- __ cmpq(rdx, i::kSmiConstantRegister);
- __ movq(rdx, Immediate(-1));
- __ cmovq(not_equal, rax, rdx);
__ popq(i::kRootRegister);
- __ popq(i::kSmiConstantRegister);
}
@@ -556,32 +547,6 @@ TEST(SmiCheck) {
cond = masm->CheckNonNegativeSmi(rcx); // "Positive" non-smi.
__ j(cond, &exit);
- // CheckIsMinSmi
-
- __ incq(rax);
- __ movq(rcx, Immediate(Smi::kMaxValue));
- __ Integer32ToSmi(rcx, rcx);
- cond = masm->CheckIsMinSmi(rcx);
- __ j(cond, &exit);
-
- __ incq(rax);
- __ movq(rcx, Immediate(0));
- __ Integer32ToSmi(rcx, rcx);
- cond = masm->CheckIsMinSmi(rcx);
- __ j(cond, &exit);
-
- __ incq(rax);
- __ movq(rcx, Immediate(Smi::kMinValue));
- __ Integer32ToSmi(rcx, rcx);
- cond = masm->CheckIsMinSmi(rcx);
- __ j(NegateCondition(cond), &exit);
-
- __ incq(rax);
- __ movq(rcx, Immediate(Smi::kMinValue + 1));
- __ Integer32ToSmi(rcx, rcx);
- cond = masm->CheckIsMinSmi(rcx);
- __ j(cond, &exit);
-
// CheckBothSmi
__ incq(rax);
diff --git a/deps/v8/test/cctest/test-macro-assembler-x87.cc b/deps/v8/test/cctest/test-macro-assembler-x87.cc
index 0b057d818f..3cee27add0 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x87.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x87.cc
@@ -33,7 +33,6 @@
#include "src/base/platform/platform.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
-#include "src/serialize.h"
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 64d995d95a..cfc971770d 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -41,10 +41,10 @@
#include "src/full-codegen.h"
#include "src/global-handles.h"
-#include "src/snapshot.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
+using v8::Just;
TEST(MarkingDeque) {
@@ -127,6 +127,7 @@ TEST(NoPromotion) {
TEST(MarkCompactCollector) {
FLAG_incremental_marking = false;
+ FLAG_retain_maps_for_n_gc = 0;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
TestHeap* heap = CcTest::test_heap();
@@ -167,9 +168,7 @@ TEST(MarkCompactCollector) {
{ HandleScope scope(isolate);
Handle<String> func_name = factory->InternalizeUtf8String("theFunction");
- v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(global, func_name);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(global, func_name));
Handle<Object> func_value =
Object::GetProperty(global, func_name).ToHandleChecked();
CHECK(func_value->IsJSFunction());
@@ -187,9 +186,7 @@ TEST(MarkCompactCollector) {
{ HandleScope scope(isolate);
Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
- v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(global, obj_name);
- CHECK(maybe.has_value);
- CHECK(maybe.value);
+ CHECK(Just(true) == JSReceiver::HasOwnProperty(global, obj_name));
Handle<Object> object =
Object::GetProperty(global, obj_name).ToHandleChecked();
CHECK(object->IsJSObject());
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index 4c85151b88..391c934475 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -58,9 +58,26 @@ TEST(Regress340063) {
if (!i::FLAG_allocation_site_pretenuring) return;
v8::HandleScope scope(CcTest::isolate());
+ SetUpNewSpaceWithPoisonedMementoAtTop();
+
+ // Call GC to see if we can handle a poisonous memento right after the
+ // current new space top pointer.
+ CcTest::i_isolate()->heap()->CollectAllGarbage(
+ Heap::kAbortIncrementalMarkingMask);
+}
+
+
+TEST(Regress470390) {
+ CcTest::InitializeVM();
+ if (!i::FLAG_allocation_site_pretenuring) return;
+ v8::HandleScope scope(CcTest::isolate());
SetUpNewSpaceWithPoisonedMementoAtTop();
+ // Set the new space limit to be equal to the top.
+ Address top = CcTest::i_isolate()->heap()->new_space()->top();
+ *(CcTest::i_isolate()->heap()->new_space()->allocation_limit_address()) = top;
+
// Call GC to see if we can handle a poisonous memento right after the
// current new space top pointer.
CcTest::i_isolate()->heap()->CollectAllGarbage(
diff --git a/deps/v8/test/cctest/test-migrations.cc b/deps/v8/test/cctest/test-migrations.cc
index 2f7ff8703c..3be173453a 100644
--- a/deps/v8/test/cctest/test-migrations.cc
+++ b/deps/v8/test/cctest/test-migrations.cc
@@ -145,6 +145,7 @@ class Expectations {
os << ": " << representations_[i].Mnemonic();
os << ", attrs: " << attributes_[i] << ")\n";
}
+ os << "\n";
}
Handle<HeapType> GetFieldType(int index) {
@@ -232,12 +233,14 @@ class Expectations {
representations_[descriptor])) {
return false;
}
- Object* expected_value = *values_[descriptor];
Object* value = descriptors->GetValue(descriptor);
+ Object* expected_value = *values_[descriptor];
switch (type) {
case DATA:
- case ACCESSOR:
- return HeapType::cast(expected_value)->Equals(HeapType::cast(value));
+ case ACCESSOR: {
+ HeapType* type = descriptors->GetFieldType(descriptor);
+ return HeapType::cast(expected_value)->Equals(type);
+ }
case DATA_CONSTANT:
return value == expected_value;
@@ -263,6 +266,9 @@ class Expectations {
for (int i = 0; i < expected_nof; i++) {
if (!Check(descriptors, i)) {
Print();
+#ifdef OBJECT_PRINT
+ descriptors->Print();
+#endif
Check(descriptors, i);
return false;
}
@@ -336,9 +342,10 @@ class Expectations {
SetDataField(property_index, attributes, representation, heap_type);
Handle<String> name = MakeName("prop", property_index);
- int t = map->SearchTransition(kData, *name, attributes);
- CHECK_NE(TransitionArray::kNotFound, t);
- return handle(map->GetTransition(t));
+ Map* target =
+ TransitionArray::SearchTransition(*map, kData, *name, attributes);
+ CHECK(target != NULL);
+ return handle(target);
}
Handle<Map> AddAccessorConstant(Handle<Map> map,
@@ -517,21 +524,51 @@ TEST(ReconfigureAccessorToNonExistingDataFieldHeavy) {
// A set of tests for representation generalization case.
//
-static void TestGeneralizeRepresentation(Representation from_representation,
- Handle<HeapType> from_type,
- Representation to_representation,
- Handle<HeapType> to_type,
- Representation expected_representation,
- Handle<HeapType> expected_type) {
+// This test ensures that representation/field type generalization at
+// |property_index| is done correctly independently of the fact that the |map|
+// is detached from transition tree or not.
+//
+// {} - p0 - p1 - p2: |detach_point_map|
+// |
+// X - detached at |detach_property_at_index|
+// |
+// + - p3 - p4: |map|
+//
+// Detaching does not happen if |detach_property_at_index| is -1.
+//
+static void TestGeneralizeRepresentation(
+ int detach_property_at_index, int property_index,
+ Representation from_representation, Handle<HeapType> from_type,
+ Representation to_representation, Handle<HeapType> to_type,
+ Representation expected_representation, Handle<HeapType> expected_type,
+ bool expected_deprecation, bool expected_field_type_dependency) {
Isolate* isolate = CcTest::i_isolate();
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ CHECK(detach_property_at_index >= -1 &&
+ detach_property_at_index < kPropCount);
+ CHECK(property_index < kPropCount);
+ CHECK_NE(detach_property_at_index, property_index);
+
+ const bool is_detached_map = detach_property_at_index >= 0;
Expectations expectations(isolate);
// Create a map, add required properties to it and initialize expectations.
Handle<Map> initial_map = Map::Create(isolate, 0);
Handle<Map> map = initial_map;
+ Handle<Map> detach_point_map;
for (int i = 0; i < kPropCount; i++) {
- map = expectations.AddDataField(map, NONE, from_representation, from_type);
+ if (i == property_index) {
+ map =
+ expectations.AddDataField(map, NONE, from_representation, from_type);
+ } else {
+ map =
+ expectations.AddDataField(map, NONE, Representation::Smi(), any_type);
+ if (i == detach_property_at_index) {
+ detach_point_map = map;
+ }
+ }
}
CHECK(!map->is_deprecated());
CHECK(map->is_stable());
@@ -540,97 +577,121 @@ static void TestGeneralizeRepresentation(Representation from_representation,
Zone zone;
FakeStubForTesting stub(isolate);
+ if (is_detached_map) {
+ detach_point_map = Map::ReconfigureProperty(
+ detach_point_map, detach_property_at_index, kData, NONE,
+ Representation::Tagged(), any_type, FORCE_FIELD);
+ expectations.SetDataField(detach_property_at_index,
+ Representation::Tagged(), any_type);
+ CHECK(map->is_deprecated());
+ CHECK(expectations.Check(*detach_point_map,
+ detach_point_map->NumberOfOwnDescriptors()));
+ }
+
// Create new maps by generalizing representation of propX field.
- Handle<Map> maps[kPropCount];
- for (int i = 0; i < kPropCount; i++) {
- Handle<Map> field_owner(map->FindFieldOwner(i), isolate);
- CompilationInfo info(&stub, isolate, &zone);
- CHECK(!info.HasAbortedDueToDependencyChange());
+ Handle<Map> field_owner(map->FindFieldOwner(property_index), isolate);
+ CompilationInfo info(&stub, isolate, &zone);
+ CHECK(!info.HasAbortedDueToDependencyChange());
+
+ Map::AddDependentCompilationInfo(field_owner, DependentCode::kFieldTypeGroup,
+ &info);
- Map::AddDependentCompilationInfo(field_owner,
- DependentCode::kFieldTypeGroup, &info);
+ Handle<Map> new_map =
+ Map::ReconfigureProperty(map, property_index, kData, NONE,
+ to_representation, to_type, FORCE_FIELD);
- Handle<Map> new_map = Map::ReconfigureProperty(
- map, i, kData, NONE, to_representation, to_type, FORCE_FIELD);
- maps[i] = new_map;
+ expectations.SetDataField(property_index, expected_representation,
+ expected_type);
- expectations.SetDataField(i, expected_representation, expected_type);
+ CHECK(!new_map->is_deprecated());
+ CHECK(expectations.Check(*new_map));
+ if (is_detached_map) {
CHECK(map->is_deprecated());
- CHECK(!info.HasAbortedDueToDependencyChange());
- info.RollbackDependencies(); // Properly cleanup compilation info.
+ CHECK_NE(*map, *new_map);
+ CHECK_EQ(expected_field_type_dependency && !field_owner->is_deprecated(),
+ info.HasAbortedDueToDependencyChange());
+ } else if (expected_deprecation) {
+ CHECK(map->is_deprecated());
+ CHECK(field_owner->is_deprecated());
CHECK_NE(*map, *new_map);
- CHECK(i == 0 || maps[i - 1]->is_deprecated());
+ CHECK(!info.HasAbortedDueToDependencyChange());
- CHECK(!new_map->is_deprecated());
- CHECK(!new_map->is_dictionary_map());
- CHECK(expectations.Check(*new_map));
+ } else {
+ CHECK(!field_owner->is_deprecated());
+ CHECK_EQ(*map, *new_map);
+
+ CHECK_EQ(expected_field_type_dependency,
+ info.HasAbortedDueToDependencyChange());
}
- Handle<Map> active_map = maps[kPropCount - 1];
- CHECK(!active_map->is_deprecated());
+ info.RollbackDependencies(); // Properly cleanup compilation info.
// Update all deprecated maps and check that they are now the same.
Handle<Map> updated_map = Map::Update(map);
- CHECK_EQ(*active_map, *updated_map);
- for (int i = 0; i < kPropCount; i++) {
- updated_map = Map::Update(maps[i]);
- CHECK_EQ(*active_map, *updated_map);
- }
+ CHECK_EQ(*new_map, *updated_map);
}
-static void TestGeneralizeRepresentationTrivial(
+static void TestGeneralizeRepresentation(
Representation from_representation, Handle<HeapType> from_type,
Representation to_representation, Handle<HeapType> to_type,
Representation expected_representation, Handle<HeapType> expected_type,
- bool expected_field_type_dependency = true) {
- Isolate* isolate = CcTest::i_isolate();
-
- Expectations expectations(isolate);
-
- // Create a map, add required properties to it and initialize expectations.
- Handle<Map> initial_map = Map::Create(isolate, 0);
- Handle<Map> map = initial_map;
- for (int i = 0; i < kPropCount; i++) {
- map = expectations.AddDataField(map, NONE, from_representation, from_type);
+ bool expected_deprecation, bool expected_field_type_dependency) {
+ // Check the cases when the map being reconfigured is a part of the
+ // transition tree.
+ STATIC_ASSERT(kPropCount > 4);
+ int indices[] = {0, 2, kPropCount - 1};
+ for (int i = 0; i < static_cast<int>(arraysize(indices)); i++) {
+ TestGeneralizeRepresentation(
+ -1, indices[i], from_representation, from_type, to_representation,
+ to_type, expected_representation, expected_type, expected_deprecation,
+ expected_field_type_dependency);
+ }
+
+ if (!from_representation.IsNone()) {
+ // Check the cases when the map being reconfigured is NOT a part of the
+ // transition tree. "None -> anything" representation changes make sense
+ // only for "attached" maps.
+ int indices[] = {0, kPropCount - 1};
+ for (int i = 0; i < static_cast<int>(arraysize(indices)); i++) {
+ TestGeneralizeRepresentation(
+ indices[i], 2, from_representation, from_type, to_representation,
+ to_type, expected_representation, expected_type, expected_deprecation,
+ expected_field_type_dependency);
+ }
}
- CHECK(!map->is_deprecated());
- CHECK(map->is_stable());
- CHECK(expectations.Check(*map));
-
- Zone zone;
- FakeStubForTesting stub(isolate);
-
- // Create new maps by generalizing representation of propX field.
- for (int i = 0; i < kPropCount; i++) {
- Handle<Map> field_owner(map->FindFieldOwner(i), isolate);
- CompilationInfo info(&stub, isolate, &zone);
- CHECK(!info.HasAbortedDueToDependencyChange());
-
- Map::AddDependentCompilationInfo(field_owner,
- DependentCode::kFieldTypeGroup, &info);
+}
- Handle<Map> new_map = Map::ReconfigureProperty(
- map, i, kData, NONE, to_representation, to_type, FORCE_FIELD);
- expectations.SetDataField(i, expected_representation, expected_type);
+static void TestGeneralizeRepresentation(Representation from_representation,
+ Handle<HeapType> from_type,
+ Representation to_representation,
+ Handle<HeapType> to_type,
+ Representation expected_representation,
+ Handle<HeapType> expected_type) {
+ const bool expected_deprecation = true;
+ const bool expected_field_type_dependency = false;
- CHECK_EQ(*map, *new_map);
- CHECK_EQ(expected_field_type_dependency,
- info.HasAbortedDueToDependencyChange());
+ TestGeneralizeRepresentation(
+ from_representation, from_type, to_representation, to_type,
+ expected_representation, expected_type, expected_deprecation,
+ expected_field_type_dependency);
+}
- info.RollbackDependencies(); // Properly cleanup compilation info.
- CHECK_EQ(*map, *new_map);
- CHECK(!new_map->is_deprecated());
- CHECK(!new_map->is_dictionary_map());
- CHECK(expectations.Check(*new_map));
- }
+static void TestGeneralizeRepresentationTrivial(
+ Representation from_representation, Handle<HeapType> from_type,
+ Representation to_representation, Handle<HeapType> to_type,
+ Representation expected_representation, Handle<HeapType> expected_type,
+ bool expected_field_type_dependency = true) {
+ const bool expected_deprecation = false;
- Handle<Map> updated_map = Map::Update(map);
- CHECK_EQ(*map, *updated_map);
+ TestGeneralizeRepresentation(
+ from_representation, from_type, to_representation, to_type,
+ expected_representation, expected_type, expected_deprecation,
+ expected_field_type_dependency);
}
@@ -828,7 +889,6 @@ TEST(GeneralizeRepresentationWithAccessorProperties) {
CHECK(i == 0 || maps[i - 1]->is_deprecated());
CHECK(!new_map->is_deprecated());
- CHECK(!new_map->is_dictionary_map());
CHECK(expectations.Check(*new_map));
}
@@ -925,7 +985,6 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentation(
CHECK_NE(*map, *new_map);
CHECK(!new_map->is_deprecated());
- CHECK(!new_map->is_dictionary_map());
CHECK(expectations.Check(*new_map));
// Update deprecated |map|, it should become |new_map|.
@@ -1016,7 +1075,6 @@ static void TestReconfigureDataFieldAttribute_GeneralizeRepresentationTrivial(
info.RollbackDependencies(); // Properly cleanup compilation info.
CHECK(!new_map->is_deprecated());
- CHECK(!new_map->is_dictionary_map());
CHECK(expectations.Check(*new_map));
Handle<Map> updated_map = Map::Update(map);
@@ -1121,7 +1179,6 @@ struct CheckDeprecated {
CHECK_NE(*map, *new_map);
CHECK(!new_map->is_deprecated());
- CHECK(!new_map->is_dictionary_map());
CHECK(expectations.Check(*new_map));
// Update deprecated |map|, it should become |new_map|.
@@ -1140,7 +1197,6 @@ struct CheckSameMap {
CHECK_EQ(*map, *new_map);
CHECK(!new_map->is_deprecated());
- CHECK(!new_map->is_dictionary_map());
CHECK(expectations.Check(*new_map));
// Update deprecated |map|, it should become |new_map|.
@@ -1163,7 +1219,6 @@ struct CheckCopyGeneralizeAllRepresentations {
}
CHECK(!new_map->is_deprecated());
- CHECK(!new_map->is_dictionary_map());
CHECK(expectations.Check(*new_map));
}
};
@@ -1423,9 +1478,10 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
}
Handle<String> name = MakeName("prop", i);
- int t = map2->SearchTransition(kData, *name, NONE);
- CHECK_NE(TransitionArray::kNotFound, t);
- map2 = handle(map2->GetTransition(t));
+ Map* target =
+ TransitionArray::SearchTransition(*map2, kData, *name, NONE);
+ CHECK(target != NULL);
+ map2 = handle(target);
}
map2 = Map::ReconfigureProperty(map2, kSplitProp, kData, NONE,
@@ -1445,12 +1501,12 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) {
// Fill in transition tree of |map2| so that it can't have more transitions.
for (int i = 0; i < TransitionArray::kMaxNumberOfTransitions; i++) {
- CHECK(map2->CanHaveMoreTransitions());
+ CHECK(TransitionArray::CanHaveMoreTransitions(map2));
Handle<String> name = MakeName("foo", i);
Map::CopyWithField(map2, name, any_type, NONE, Representation::Smi(),
INSERT_TRANSITION).ToHandleChecked();
}
- CHECK(!map2->CanHaveMoreTransitions());
+ CHECK(!TransitionArray::CanHaveMoreTransitions(map2));
// Try to update |map|, since there is no place for propX transition at |map2|
// |map| should become "copy-generalized".
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 1a17475ada..5b19746f93 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -72,7 +72,6 @@ TEST(ScanKeywords) {
i::Utf8ToUtf16CharacterStream stream(keyword, length);
i::Scanner scanner(&unicode_cache);
// The scanner should parse Harmony keywords for this test.
- scanner.SetHarmonyScoping(true);
scanner.SetHarmonyModules(true);
scanner.SetHarmonyClasses(true);
scanner.Initialize(&stream);
@@ -1045,15 +1044,14 @@ TEST(ScopeUsesArgumentsSuperThis) {
factory->NewStringFromUtf8(i::CStrVector(program.start()))
.ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
- i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(), isolate->unicode_cache());
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
parser.set_allow_harmony_arrow_functions(true);
parser.set_allow_harmony_classes(true);
parser.set_allow_harmony_object_literals(true);
- parser.set_allow_harmony_scoping(true);
parser.set_allow_harmony_sloppy(true);
- info.MarkAsGlobal();
+ info.set_global();
CHECK(parser.Parse(&info));
CHECK(i::Rewriter::Rewrite(&info));
CHECK(i::Scope::Analyze(&info));
@@ -1086,8 +1084,6 @@ TEST(ScopeUsesArgumentsSuperThis) {
TEST(ScopePositions) {
- v8::internal::FLAG_harmony_scoping = true;
-
// Test the parser for correctly setting the start and end positions
// of a scope. We check the scope positions of exactly one scope
// nested in the global scope of a program. 'inner source' is the
@@ -1297,14 +1293,13 @@ TEST(ScopePositions) {
i::CStrVector(program.start())).ToHandleChecked();
CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = factory->NewScript(source);
- i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(), isolate->unicode_cache());
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
parser.set_allow_lazy(true);
- parser.set_allow_harmony_scoping(true);
parser.set_allow_harmony_arrow_functions(true);
- info.MarkAsGlobal();
- info.SetLanguageMode(source_data[i].language_mode);
+ info.set_global();
+ info.set_language_mode(source_data[i].language_mode);
parser.Parse(&info);
CHECK(info.function() != NULL);
@@ -1377,14 +1372,12 @@ i::Handle<i::String> FormatMessage(i::Vector<unsigned> data) {
enum ParserFlag {
kAllowLazy,
kAllowNatives,
- kAllowHarmonyScoping,
kAllowHarmonyModules,
kAllowHarmonyNumericLiterals,
kAllowHarmonyArrowFunctions,
kAllowHarmonyClasses,
kAllowHarmonyObjectLiterals,
kAllowHarmonyRestParameters,
- kAllowHarmonyTemplates,
kAllowHarmonySloppy,
kAllowHarmonyUnicode,
kAllowHarmonyComputedPropertyNames,
@@ -1403,7 +1396,6 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
i::EnumSet<ParserFlag> flags) {
parser->set_allow_lazy(flags.Contains(kAllowLazy));
parser->set_allow_natives(flags.Contains(kAllowNatives));
- parser->set_allow_harmony_scoping(flags.Contains(kAllowHarmonyScoping));
parser->set_allow_harmony_modules(flags.Contains(kAllowHarmonyModules));
parser->set_allow_harmony_numeric_literals(
flags.Contains(kAllowHarmonyNumericLiterals));
@@ -1412,7 +1404,6 @@ void SetParserFlags(i::ParserBase<Traits>* parser,
parser->set_allow_harmony_arrow_functions(
flags.Contains(kAllowHarmonyArrowFunctions));
parser->set_allow_harmony_classes(flags.Contains(kAllowHarmonyClasses));
- parser->set_allow_harmony_templates(flags.Contains(kAllowHarmonyTemplates));
parser->set_allow_harmony_rest_params(
flags.Contains(kAllowHarmonyRestParameters));
parser->set_allow_harmony_sloppy(flags.Contains(kAllowHarmonySloppy));
@@ -1456,11 +1447,11 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
i::FunctionLiteral* function;
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(), isolate->unicode_cache());
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
SetParserFlags(&parser, flags);
- info.MarkAsGlobal();
+ info.set_global();
parser.Parse(&info);
function = info.function();
if (function) {
@@ -1973,8 +1964,8 @@ TEST(NoErrorsFutureStrictReservedWords) {
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
- static const ParserFlag classes_flags[] = {
- kAllowHarmonyArrowFunctions, kAllowHarmonyClasses, kAllowHarmonyScoping};
+ static const ParserFlag classes_flags[] = {kAllowHarmonyArrowFunctions,
+ kAllowHarmonyClasses};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
classes_flags, arraysize(classes_flags));
}
@@ -2544,10 +2535,13 @@ TEST(DontRegressPreParserDataSizes) {
i::Handle<i::String> source =
factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
i::Handle<i::Script> script = factory->NewScript(source);
- i::CompilationInfoWithZone info(script);
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
i::ScriptData* sd = NULL;
- info.SetCachedData(&sd, v8::ScriptCompiler::kProduceParserCache);
- i::Parser::ParseStatic(&info, true);
+ info.set_cached_data(&sd);
+ info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
+ info.set_allow_lazy_parsing();
+ i::Parser::ParseStatic(&info);
i::ParseData* pd = i::ParseData::FromCachedData(sd);
if (pd->FunctionCount() != test_cases[i].functions) {
@@ -2961,6 +2955,36 @@ TEST(StrictDelete) {
}
+TEST(NoErrorsDeclsInCase) {
+ const char* context_data[][2] = {
+ {"'use strict'; switch(x) { case 1:", "}"},
+ {"function foo() {'use strict'; switch(x) { case 1:", "}}"},
+ {"'use strict'; switch(x) { case 1: case 2:", "}"},
+ {"function foo() {'use strict'; switch(x) { case 1: case 2:", "}}"},
+ {"'use strict'; switch(x) { default:", "}"},
+ {"function foo() {'use strict'; switch(x) { default:", "}}"},
+ {"'use strict'; switch(x) { case 1: default:", "}"},
+ {"function foo() {'use strict'; switch(x) { case 1: default:", "}}"},
+ { nullptr, nullptr }
+ };
+
+ const char* statement_data[] = {
+ "function f() { }",
+ "class C { }",
+ "class C extends Q {}",
+ "function f() { } class C {}",
+ "function f() { }; class C {}",
+ "class C {}; function f() {}",
+ nullptr
+ };
+
+ static const ParserFlag always_flags[] = {kAllowHarmonyClasses};
+
+ RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+ always_flags, arraysize(always_flags));
+}
+
+
TEST(InvalidLeftHandSide) {
const char* assignment_context_data[][2] = {
{"", " = 1;"},
@@ -3246,70 +3270,6 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
}
-TEST(ExportsMaybeAssigned) {
- i::FLAG_use_strict = true;
- i::FLAG_harmony_scoping = true;
- i::FLAG_harmony_modules = true;
-
- i::Isolate* isolate = CcTest::i_isolate();
- i::Factory* factory = isolate->factory();
- i::HandleScope scope(isolate);
- LocalContext env;
-
- const char* src =
- "module A {"
- " export var x = 1;"
- " export function f() { return x };"
- " export const y = 2;"
- " module B {}"
- " export module C {}"
- "};"
- "A.f";
-
- i::ScopedVector<char> program(Utf8LengthHelper(src) + 1);
- i::SNPrintF(program, "%s", src);
- i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
- source->PrintOn(stdout);
- printf("\n");
- i::Zone zone;
- v8::Local<v8::Value> v = CompileRun(src);
- i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
- i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
- i::Context* context = f->context();
- i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
- avf.Internalize(isolate);
-
- i::Scope* script_scope =
- new (&zone) i::Scope(&zone, NULL, i::SCRIPT_SCOPE, &avf);
- script_scope->Initialize();
- i::Scope* s =
- i::Scope::DeserializeScopeChain(isolate, &zone, context, script_scope);
- DCHECK(s != script_scope);
- const i::AstRawString* name_x = avf.GetOneByteString("x");
- const i::AstRawString* name_f = avf.GetOneByteString("f");
- const i::AstRawString* name_y = avf.GetOneByteString("y");
- const i::AstRawString* name_B = avf.GetOneByteString("B");
- const i::AstRawString* name_C = avf.GetOneByteString("C");
-
- // Get result from h's function context (that is f's context)
- i::Variable* var_x = s->Lookup(name_x);
- CHECK(var_x != NULL);
- CHECK(var_x->maybe_assigned() == i::kMaybeAssigned);
- i::Variable* var_f = s->Lookup(name_f);
- CHECK(var_f != NULL);
- CHECK(var_f->maybe_assigned() == i::kMaybeAssigned);
- i::Variable* var_y = s->Lookup(name_y);
- CHECK(var_y != NULL);
- CHECK(var_y->maybe_assigned() == i::kNotAssigned);
- i::Variable* var_B = s->Lookup(name_B);
- CHECK(var_B != NULL);
- CHECK(var_B->maybe_assigned() == i::kNotAssigned);
- i::Variable* var_C = s->Lookup(name_C);
- CHECK(var_C != NULL);
- CHECK(var_C->maybe_assigned() == i::kNotAssigned);
-}
-
-
TEST(InnerAssignment) {
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
@@ -3431,11 +3391,9 @@ TEST(InnerAssignment) {
printf("\n");
i::Handle<i::Script> script = factory->NewScript(source);
- i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(),
- isolate->unicode_cache());
- parser.set_allow_harmony_scoping(true);
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
CHECK(parser.Parse(&info));
CHECK(i::Compiler::Analyze(&info));
CHECK(info.function() != NULL);
@@ -3583,7 +3541,7 @@ TEST(ErrorsArrowFunctions) {
};
// The test is quite slow, so run it with a reduced set of flags.
- static const ParserFlag flags[] = {kAllowLazy, kAllowHarmonyScoping};
+ static const ParserFlag flags[] = {kAllowLazy};
static const ParserFlag always_flags[] = { kAllowHarmonyArrowFunctions };
RunParserSyncTest(context_data, statement_data, kError, flags,
arraysize(flags), always_flags, arraysize(always_flags));
@@ -4111,8 +4069,7 @@ TEST(ClassDeclarationNoErrors) {
"class name extends class base {} {}",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonyClasses, kAllowHarmonyScoping};
+ static const ParserFlag always_flags[] = {kAllowHarmonyClasses};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
}
@@ -4631,9 +4588,7 @@ TEST(ConstParsingInForIn) {
"for(const x in [1,2,3]) {}",
"for(const x of [1,2,3]) {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyScoping};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess, nullptr, 0, nullptr, 0);
}
@@ -4653,9 +4608,7 @@ TEST(ConstParsingInForInError) {
"for(const x = 1, y = 2 of []) {}",
"for(const x,y of []) {}",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyScoping};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError, nullptr, 0, nullptr, 0);
}
@@ -4749,9 +4702,7 @@ TEST(ScanTemplateLiterals) {
"`foo${\r a}`",
"`foo${'a' in a}`",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyTemplates};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, NULL, 0);
}
@@ -4786,9 +4737,7 @@ TEST(ScanTaggedTemplateLiterals) {
"tag`foo${\r a}`",
"tag`foo${'a' in a}`",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyTemplates};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, NULL, 0);
}
@@ -4815,9 +4764,7 @@ TEST(TemplateMaterializedLiterals) {
NULL
};
- static const ParserFlag always_flags[] = {kAllowHarmonyTemplates};
- RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kSuccess, NULL, 0, NULL, 0);
}
@@ -4851,9 +4798,7 @@ TEST(ScanUnterminatedTemplateLiterals) {
"`foo${fn(}`",
"`foo${1 if}`",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyTemplates};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0);
}
@@ -4873,9 +4818,7 @@ TEST(TemplateLiteralsIllegalTokens) {
"`hello${1}\\x\n${2}`",
NULL};
- static const ParserFlag always_flags[] = {kAllowHarmonyTemplates};
- RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
- arraysize(always_flags));
+ RunParserSyncTest(context_data, data, kError, NULL, 0, NULL, 0);
}
@@ -5010,8 +4953,7 @@ TEST(LexicalScopingSloppyMode) {
"(class C {})",
"(class C extends D {})",
NULL};
- static const ParserFlag always_true_flags[] = {
- kAllowHarmonyScoping, kAllowHarmonyClasses};
+ static const ParserFlag always_true_flags[] = {kAllowHarmonyClasses};
static const ParserFlag always_false_flags[] = {kAllowHarmonySloppy};
RunParserSyncTest(context_data, bad_data, kError, NULL, 0,
always_true_flags, arraysize(always_true_flags),
@@ -5108,6 +5050,7 @@ TEST(BasicImportExportParsing) {
"export { yield } from 'm.js'",
"export { static } from 'm.js'",
"export { let } from 'm.js'",
+ "var a; export { a as b, a as c };",
"import 'somemodule.js';",
"import { } from 'm.js';",
@@ -5135,23 +5078,18 @@ TEST(BasicImportExportParsing) {
128 * 1024);
for (unsigned i = 0; i < arraysize(kSources); ++i) {
- int kProgramByteSize = i::StrLength(kSources[i]);
- i::ScopedVector<char> program(kProgramByteSize + 1);
- i::SNPrintF(program, "%s", kSources[i]);
i::Handle<i::String> source =
- factory->NewStringFromUtf8(i::CStrVector(program.start()))
- .ToHandleChecked();
+ factory->NewStringFromAsciiChecked(kSources[i]);
// Show that parsing as a module works
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(), isolate->unicode_cache());
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
parser.set_allow_harmony_classes(true);
parser.set_allow_harmony_modules(true);
- parser.set_allow_harmony_scoping(true);
- info.MarkAsModule();
+ info.set_module();
if (!parser.Parse(&info)) {
i::Handle<i::JSObject> exception_handle(
i::JSObject::cast(isolate->pending_exception()));
@@ -5173,13 +5111,12 @@ TEST(BasicImportExportParsing) {
// And that parsing a script does not.
{
i::Handle<i::Script> script = factory->NewScript(source);
- i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(), isolate->unicode_cache());
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
parser.set_allow_harmony_classes(true);
parser.set_allow_harmony_modules(true);
- parser.set_allow_harmony_scoping(true);
- info.MarkAsGlobal();
+ info.set_global();
CHECK(!parser.Parse(&info));
}
}
@@ -5211,6 +5148,10 @@ TEST(ImportExportParsingErrors) {
"export { arguments }",
"export { arguments as foo }",
"var a; export { a, a };",
+ "var a, b; export { a as b, b };",
+ "var a, b; export { a as c, b as c };",
+ "export default function f(){}; export default class C {};",
+ "export default function f(){}; var a; export { a as default };",
"import from;",
"import from 'm.js';",
@@ -5256,26 +5197,81 @@ TEST(ImportExportParsingErrors) {
128 * 1024);
for (unsigned i = 0; i < arraysize(kErrorSources); ++i) {
- int kProgramByteSize = i::StrLength(kErrorSources[i]);
- i::ScopedVector<char> program(kProgramByteSize + 1);
- i::SNPrintF(program, "%s", kErrorSources[i]);
i::Handle<i::String> source =
- factory->NewStringFromUtf8(i::CStrVector(program.start()))
- .ToHandleChecked();
+ factory->NewStringFromAsciiChecked(kErrorSources[i]);
i::Handle<i::Script> script = factory->NewScript(source);
- i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(), isolate->unicode_cache());
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
parser.set_allow_harmony_classes(true);
parser.set_allow_harmony_modules(true);
- parser.set_allow_harmony_scoping(true);
- info.MarkAsModule();
+ info.set_module();
CHECK(!parser.Parse(&info));
}
}
+TEST(ModuleParsingInternals) {
+ i::FLAG_harmony_modules = true;
+
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::Factory* factory = isolate->factory();
+ v8::HandleScope handles(CcTest::isolate());
+ v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
+ v8::Context::Scope context_scope(context);
+ isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+ 128 * 1024);
+
+ static const char kSource[] =
+ "let x = 5;"
+ "export { x as y };"
+ "import { q as z } from 'm.js';"
+ "import n from 'n.js'";
+ i::Handle<i::String> source = factory->NewStringFromAsciiChecked(kSource);
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
+ i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
+ i::Parser parser(&info);
+ parser.set_allow_harmony_modules(true);
+ info.set_module();
+ CHECK(parser.Parse(&info));
+ CHECK(i::Compiler::Analyze(&info));
+
+ i::FunctionLiteral* func = info.function();
+ i::Scope* module_scope = func->scope();
+ i::Scope* outer_scope = module_scope->outer_scope();
+ CHECK(outer_scope->is_script_scope());
+ CHECK_NULL(outer_scope->outer_scope());
+ CHECK_EQ(1, outer_scope->num_modules());
+ CHECK(module_scope->is_module_scope());
+ CHECK_NOT_NULL(module_scope->module_var());
+ CHECK_EQ(i::INTERNAL, module_scope->module_var()->mode());
+
+ i::ModuleDescriptor* descriptor = module_scope->module();
+ CHECK_NOT_NULL(descriptor);
+ CHECK_EQ(1, descriptor->Length());
+ const i::AstRawString* export_name = avf.GetOneByteString("y");
+ const i::AstRawString* local_name =
+ descriptor->LookupLocalExport(export_name, &zone);
+ CHECK_NOT_NULL(local_name);
+ CHECK(local_name->IsOneByteEqualTo("x"));
+ i::ZoneList<i::Declaration*>* declarations = module_scope->declarations();
+ CHECK_EQ(3, declarations->length());
+ CHECK(declarations->at(0)->proxy()->raw_name()->IsOneByteEqualTo("x"));
+ i::ImportDeclaration* import_decl =
+ declarations->at(1)->AsImportDeclaration();
+ CHECK(import_decl->import_name()->IsOneByteEqualTo("q"));
+ CHECK(import_decl->proxy()->raw_name()->IsOneByteEqualTo("z"));
+ CHECK(import_decl->module_specifier()->IsOneByteEqualTo("m.js"));
+ import_decl = declarations->at(2)->AsImportDeclaration();
+ CHECK(import_decl->import_name()->IsOneByteEqualTo("default"));
+ CHECK(import_decl->proxy()->raw_name()->IsOneByteEqualTo("n"));
+ CHECK(import_decl->module_specifier()->IsOneByteEqualTo("n.js"));
+}
+
+
TEST(DuplicateProtoError) {
const char* context_data[][2] = {
{"({", "});"},
@@ -5339,8 +5335,8 @@ TEST(DeclarationsError) {
"class C {}",
NULL};
- static const ParserFlag always_flags[] = {
- kAllowHarmonyClasses, kAllowHarmonyScoping, kAllowStrongMode};
+ static const ParserFlag always_flags[] = {kAllowHarmonyClasses,
+ kAllowStrongMode};
RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
always_flags, arraysize(always_flags));
}
@@ -5358,11 +5354,11 @@ void TestLanguageMode(const char* source,
i::Handle<i::Script> script =
factory->NewScript(factory->NewStringFromAsciiChecked(source));
- i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, isolate->stack_guard()->real_climit(),
- isolate->heap()->HashSeed(), isolate->unicode_cache());
+ i::Zone zone;
+ i::ParseInfo info(&zone, script);
+ i::Parser parser(&info);
parser.set_allow_strong_mode(true);
- info.MarkAsGlobal();
+ info.set_global();
parser.Parse(&info);
CHECK(info.function() != NULL);
CHECK_EQ(expected_language_mode, info.function()->language_mode());
@@ -5425,8 +5421,7 @@ TEST(PropertyNameEvalArguments) {
NULL};
static const ParserFlag always_flags[] = {
- kAllowHarmonyClasses, kAllowHarmonyObjectLiterals, kAllowHarmonyScoping,
- kAllowStrongMode};
+ kAllowHarmonyClasses, kAllowHarmonyObjectLiterals, kAllowStrongMode};
RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
always_flags, arraysize(always_flags));
}
@@ -5498,8 +5493,7 @@ TEST(VarForbiddenInStrongMode) {
"const x = 0;",
NULL};
- static const ParserFlag always_flags[] = {kAllowStrongMode,
- kAllowHarmonyScoping};
+ static const ParserFlag always_flags[] = {kAllowStrongMode};
RunParserSyncTest(strong_context_data, var_declarations, kError, NULL, 0,
always_flags, arraysize(always_flags));
RunParserSyncTest(strong_context_data, let_declarations, kSuccess, NULL, 0,
@@ -5539,7 +5533,7 @@ TEST(StrongEmptySubStatements) {
NULL};
static const ParserFlag always_flags[] = {
- kAllowStrongMode, kAllowHarmonyScoping
+ kAllowStrongMode,
};
RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, always_flags,
arraysize(always_flags));
@@ -5561,7 +5555,7 @@ TEST(StrongForIn) {
NULL};
static const ParserFlag always_flags[] = {
- kAllowStrongMode, kAllowHarmonyScoping
+ kAllowStrongMode,
};
RunParserSyncTest(sloppy_context_data, data, kSuccess, NULL, 0, always_flags,
arraysize(always_flags));
@@ -5570,3 +5564,227 @@ TEST(StrongForIn) {
RunParserSyncTest(strong_context_data, data, kError, NULL, 0, always_flags,
arraysize(always_flags));
}
+
+
+TEST(StrongSuperCalls) {
+ const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
+ const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
+ const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
+
+ const char* data[] = {
+ "class C extends Object { constructor() {} }",
+ "class C extends Object { constructor() { (super()); } }",
+ "class C extends Object { constructor() { (() => super())(); } }",
+ "class C extends Object { constructor() { { super(); } } }",
+ "class C extends Object { constructor() { if (1) super(); } }",
+ "class C extends Object { constructor() { super(), super(); } }",
+ "class C extends Object { constructor() { super(); super(); } }",
+ "class C extends Object { constructor() { super(); (super()); } }",
+ "class C extends Object { constructor() { super(); { super() } } }",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowStrongMode, kAllowHarmonyClasses, kAllowHarmonyObjectLiterals,
+ kAllowHarmonyArrowFunctions
+ };
+ RunParserSyncTest(sloppy_context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(strong_context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(StrongConstructorReturns) {
+ const char* sloppy_context_data[][2] = {{"", ""}, {NULL}};
+ const char* strict_context_data[][2] = {{"'use strict';", ""}, {NULL}};
+ const char* strong_context_data[][2] = {{"'use strong';", ""}, {NULL}};
+
+ const char* data[] = {
+ "class C extends Object { constructor() { super(); return {}; } }",
+ "class C extends Object { constructor() { super(); { return {}; } } }",
+ "class C extends Object { constructor() { super(); if (1) return {}; } }",
+ "class C extends Object { constructor() { return; super(); } }",
+ "class C extends Object { constructor() { { return; } super(); } }",
+ "class C extends Object { constructor() { if (0) return; super(); } }",
+ NULL};
+
+ static const ParserFlag always_flags[] = {
+ kAllowStrongMode, kAllowHarmonyClasses, kAllowHarmonyObjectLiterals
+ };
+ RunParserSyncTest(sloppy_context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(strict_context_data, data, kSuccess, NULL, 0, always_flags,
+ arraysize(always_flags));
+ RunParserSyncTest(strong_context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(ArrowFunctionASIErrors) {
+ const char* context_data[][2] = {{"'use strict';", ""}, {"", ""},
+ {NULL, NULL}};
+
+ const char* data[] = {
+ "(a\n=> a)(1)",
+ "(a/*\n*/=> a)(1)",
+ "((a)\n=> a)(1)",
+ "((a)/*\n*/=> a)(1)",
+ "((a, b)\n=> a + b)(1, 2)",
+ "((a, b)/*\n*/=> a + b)(1, 2)",
+ NULL};
+ static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions};
+ RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+ arraysize(always_flags));
+}
+
+
+TEST(StrongModeFreeVariablesDeclaredByPreviousScript) {
+ i::FLAG_strong_mode = true;
+ v8::V8::Initialize();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Context::Scope context_scope(v8::Context::New(CcTest::isolate()));
+ v8::TryCatch try_catch;
+
+ // Introduce a bunch of variables, in all language modes.
+ const char* script1 =
+ "var my_var1 = 0; \n"
+ "function my_func1() { } \n"
+ "const my_const1 = 0; \n";
+ CompileRun(v8_str(script1));
+ CHECK(!try_catch.HasCaught());
+
+ const char* script2 =
+ "\"use strict\"; \n"
+ "let my_var2 = 0; \n"
+ "function my_func2() { } \n"
+ "const my_const2 = 0 \n";
+ CompileRun(v8_str(script2));
+ CHECK(!try_catch.HasCaught());
+
+ const char* script3 =
+ "\"use strong\"; \n"
+ "let my_var3 = 0; \n"
+ "function my_func3() { } \n"
+ "const my_const3 = 0; \n";
+ CompileRun(v8_str(script3));
+ CHECK(!try_catch.HasCaught());
+
+ // Sloppy eval introduces variables in the surrounding scope.
+ const char* script4 =
+ "eval('var my_var4 = 0;') \n"
+ "eval('function my_func4() { }') \n"
+ "eval('const my_const4 = 0;') \n";
+ CompileRun(v8_str(script4));
+ CHECK(!try_catch.HasCaught());
+
+ // Test that referencing these variables work.
+ const char* script5 =
+ "\"use strong\"; \n"
+ "my_var1; \n"
+ "my_func1; \n"
+ "my_const1; \n"
+ "my_var2; \n"
+ "my_func2; \n"
+ "my_const2; \n"
+ "my_var3; \n"
+ "my_func3; \n"
+ "my_const3; \n"
+ "my_var4; \n"
+ "my_func4; \n"
+ "my_const4; \n";
+ CompileRun(v8_str(script5));
+ CHECK(!try_catch.HasCaught());
+}
+
+
+TEST(StrongModeFreeVariablesDeclaredByLanguage) {
+ i::FLAG_strong_mode = true;
+ v8::V8::Initialize();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Context::Scope context_scope(v8::Context::New(CcTest::isolate()));
+ v8::TryCatch try_catch;
+
+ const char* script1 =
+ "\"use strong\"; \n"
+ "Math; \n"
+ "RegExp; \n";
+ CompileRun(v8_str(script1));
+ CHECK(!try_catch.HasCaught());
+}
+
+
+TEST(StrongModeFreeVariablesDeclaredInGlobalPrototype) {
+ i::FLAG_strong_mode = true;
+ v8::V8::Initialize();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Context::Scope context_scope(v8::Context::New(CcTest::isolate()));
+ v8::TryCatch try_catch;
+
+ const char* script1 = "this.__proto__.my_var = 0;\n";
+ CompileRun(v8_str(script1));
+ CHECK(!try_catch.HasCaught());
+
+ const char* script2 =
+ "\"use strong\"; \n"
+ "my_var; \n";
+ CompileRun(v8_str(script2));
+ CHECK(!try_catch.HasCaught());
+}
+
+
+TEST(StrongModeFreeVariablesNotDeclared) {
+ i::FLAG_strong_mode = true;
+ v8::V8::Initialize();
+ v8::HandleScope scope(CcTest::isolate());
+ v8::Context::Scope context_scope(v8::Context::New(CcTest::isolate()));
+ v8::TryCatch try_catch;
+
+ // Test that referencing unintroduced variables in sloppy mode is ok.
+ const char* script1 =
+ "if (false) { \n"
+ " not_there1; \n"
+ "} \n";
+ CompileRun(v8_str(script1));
+ CHECK(!try_catch.HasCaught());
+
+ // But not in strong mode.
+ {
+ const char* script2 =
+ "\"use strong\"; \n"
+ "if (false) { \n"
+ " not_there2; \n"
+ "} \n";
+ v8::TryCatch try_catch2;
+ v8::Script::Compile(v8_str(script2));
+ CHECK(try_catch2.HasCaught());
+ v8::String::Utf8Value exception(try_catch2.Exception());
+ CHECK_EQ(0,
+ strcmp(
+ "ReferenceError: In strong mode, using an undeclared global "
+ "variable 'not_there2' is not allowed",
+ *exception));
+ }
+
+ // Check that the variable reference is detected inside a strong function too,
+ // even if the script scope is not strong.
+ {
+ const char* script3 =
+ "(function not_lazy() { \n"
+ " \"use strong\"; \n"
+ " if (false) { \n"
+ " not_there3; \n"
+ " } \n"
+ "})(); \n";
+ v8::TryCatch try_catch2;
+ v8::Script::Compile(v8_str(script3));
+ CHECK(try_catch2.HasCaught());
+ v8::String::Utf8Value exception(try_catch2.Exception());
+ CHECK_EQ(0,
+ strcmp(
+ "ReferenceError: In strong mode, using an undeclared global "
+ "variable 'not_there3' is not allowed",
+ *exception));
+ }
+}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 55eac60f37..80aa77bacf 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -35,83 +35,41 @@
#include "src/compilation-cache.h"
#include "src/debug.h"
#include "src/heap/spaces.h"
-#include "src/natives.h"
#include "src/objects.h"
+#include "src/parser.h"
#include "src/runtime/runtime.h"
#include "src/scopeinfo.h"
-#include "src/serialize.h"
-#include "src/snapshot.h"
+#include "src/snapshot/natives.h"
+#include "src/snapshot/serialize.h"
+#include "src/snapshot/snapshot.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
-template <class T>
-static Address AddressOf(T id) {
- return ExternalReference(id, CcTest::i_isolate()).address();
+bool DefaultSnapshotAvailable() {
+ return i::Snapshot::DefaultSnapshotBlob() != NULL;
}
-template <class T>
-static uint32_t Encode(const ExternalReferenceEncoder& encoder, T id) {
- return encoder.Encode(AddressOf(id));
+void DisableTurbofan() {
+ const char* flag = "--turbo-filter=\"\"";
+ FlagList::SetFlagsFromString(flag, StrLength(flag));
}
-static uint32_t make_code(TypeCode type, int id) {
- return static_cast<uint32_t>(type) << kReferenceTypeShift | id;
-}
-
-
-TEST(ExternalReferenceEncoder) {
- Isolate* isolate = CcTest::i_isolate();
- v8::V8::Initialize();
-
- ExternalReferenceEncoder encoder(isolate);
- CHECK_EQ(make_code(BUILTIN, Builtins::kArrayCode),
- Encode(encoder, Builtins::kArrayCode));
- CHECK_EQ(make_code(v8::internal::RUNTIME_FUNCTION, Runtime::kAbort),
- Encode(encoder, Runtime::kAbort));
- ExternalReference stack_limit_address =
- ExternalReference::address_of_stack_limit(isolate);
- CHECK_EQ(make_code(UNCLASSIFIED, 2),
- encoder.Encode(stack_limit_address.address()));
- ExternalReference real_stack_limit_address =
- ExternalReference::address_of_real_stack_limit(isolate);
- CHECK_EQ(make_code(UNCLASSIFIED, 3),
- encoder.Encode(real_stack_limit_address.address()));
- CHECK_EQ(make_code(UNCLASSIFIED, 8),
- encoder.Encode(ExternalReference::debug_break(isolate).address()));
- CHECK_EQ(
- make_code(UNCLASSIFIED, 4),
- encoder.Encode(ExternalReference::new_space_start(isolate).address()));
- CHECK_EQ(
- make_code(UNCLASSIFIED, 1),
- encoder.Encode(ExternalReference::roots_array_start(isolate).address()));
- CHECK_EQ(make_code(UNCLASSIFIED, 33),
- encoder.Encode(ExternalReference::cpu_features().address()));
-}
-
-
-TEST(ExternalReferenceDecoder) {
- Isolate* isolate = CcTest::i_isolate();
- v8::V8::Initialize();
-
- ExternalReferenceDecoder decoder(isolate);
- CHECK_EQ(AddressOf(Builtins::kArrayCode),
- decoder.Decode(make_code(BUILTIN, Builtins::kArrayCode)));
- CHECK_EQ(AddressOf(Runtime::kAbort),
- decoder.Decode(make_code(v8::internal::RUNTIME_FUNCTION,
- Runtime::kAbort)));
- CHECK_EQ(ExternalReference::address_of_stack_limit(isolate).address(),
- decoder.Decode(make_code(UNCLASSIFIED, 2)));
- CHECK_EQ(ExternalReference::address_of_real_stack_limit(isolate).address(),
- decoder.Decode(make_code(UNCLASSIFIED, 3)));
- CHECK_EQ(ExternalReference::debug_break(isolate).address(),
- decoder.Decode(make_code(UNCLASSIFIED, 8)));
- CHECK_EQ(ExternalReference::new_space_start(isolate).address(),
- decoder.Decode(make_code(UNCLASSIFIED, 4)));
-}
+// TestIsolate is used for testing isolate serialization.
+class TestIsolate : public Isolate {
+ public:
+ static v8::Isolate* NewInitialized(bool enable_serializer) {
+ i::Isolate* isolate = new TestIsolate(enable_serializer);
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8::Isolate::Scope isolate_scope(v8_isolate);
+ isolate->Init(NULL);
+ return v8_isolate;
+ }
+ explicit TestIsolate(bool enable_serializer) : Isolate(enable_serializer) {}
+};
void WritePayload(const Vector<const byte>& payload, const char* file_name) {
@@ -175,24 +133,20 @@ Vector<const uint8_t> ConstructSource(Vector<const uint8_t> head,
// Test that the whole heap can be serialized.
UNINITIALIZED_TEST(Serialize) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate::CreateParams params;
- params.enable_serializer = true;
- v8::Isolate* isolate = v8::Isolate::New(params);
- Serialize(isolate);
- }
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ v8::Isolate* isolate = TestIsolate::NewInitialized(true);
+ Serialize(isolate);
}
// Test that heap serialization is non-destructive.
UNINITIALIZED_TEST(SerializeTwice) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate::CreateParams params;
- params.enable_serializer = true;
- v8::Isolate* isolate = v8::Isolate::New(params);
- Serialize(isolate);
- Serialize(isolate);
- }
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ v8::Isolate* isolate = TestIsolate::NewInitialized(true);
+ Serialize(isolate);
+ Serialize(isolate);
}
@@ -207,7 +161,7 @@ v8::Isolate* InitializeFromFile(const char* snapshot_file) {
{
SnapshotData snapshot_data(Vector<const byte>(str, len));
Deserializer deserializer(&snapshot_data);
- Isolate* isolate = Isolate::NewForTesting();
+ Isolate* isolate = new TestIsolate(false);
v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Isolate::Scope isolate_scope(v8_isolate);
isolate->Init(&deserializer);
@@ -241,466 +195,538 @@ UNINITIALIZED_DEPENDENT_TEST(Deserialize, Serialize) {
// The serialize-deserialize tests only work if the VM is built without
// serialization. That doesn't matter. We don't need to be able to
// serialize a snapshot in a VM that is booted from a snapshot.
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = Deserialize();
- {
- v8::HandleScope handle_scope(isolate);
- v8::Isolate::Scope isolate_scope(isolate);
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ v8::Isolate* isolate = Deserialize();
+ {
+ v8::HandleScope handle_scope(isolate);
+ v8::Isolate::Scope isolate_scope(isolate);
- v8::Local<v8::Context> env = v8::Context::New(isolate);
- env->Enter();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
+ env->Enter();
- SanityCheck(isolate);
- }
- isolate->Dispose();
+ SanityCheck(isolate);
}
+ isolate->Dispose();
}
UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerialization,
SerializeTwice) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = Deserialize();
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ v8::Isolate* isolate = Deserialize();
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> env = v8::Context::New(isolate);
- env->Enter();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
+ env->Enter();
- SanityCheck(isolate);
- }
- isolate->Dispose();
+ SanityCheck(isolate);
}
+ isolate->Dispose();
}
UNINITIALIZED_DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = Deserialize();
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ v8::Isolate* isolate = Deserialize();
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> env = v8::Context::New(isolate);
- env->Enter();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
+ env->Enter();
- const char* c_source = "\"1234\".length";
- v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
- v8::Local<v8::Script> script = v8::Script::Compile(source);
- CHECK_EQ(4, script->Run()->Int32Value());
- }
- isolate->Dispose();
+ const char* c_source = "\"1234\".length";
+ v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
+ v8::Local<v8::Script> script = v8::Script::Compile(source);
+ CHECK_EQ(4, script->Run()->Int32Value());
}
+ isolate->Dispose();
}
UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
SerializeTwice) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate* isolate = Deserialize();
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ v8::Isolate* isolate = Deserialize();
+ {
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> env = v8::Context::New(isolate);
- env->Enter();
+ v8::Local<v8::Context> env = v8::Context::New(isolate);
+ env->Enter();
- const char* c_source = "\"1234\".length";
- v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
- v8::Local<v8::Script> script = v8::Script::Compile(source);
- CHECK_EQ(4, script->Run()->Int32Value());
- }
- isolate->Dispose();
+ const char* c_source = "\"1234\".length";
+ v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
+ v8::Local<v8::Script> script = v8::Script::Compile(source);
+ CHECK_EQ(4, script->Run()->Int32Value());
}
+ isolate->Dispose();
}
UNINITIALIZED_TEST(PartialSerialization) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate::CreateParams params;
- params.enable_serializer = true;
- v8::Isolate* v8_isolate = v8::Isolate::New(params);
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- v8_isolate->Enter();
- {
- Heap* heap = isolate->heap();
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ v8::Isolate* v8_isolate = TestIsolate::NewInitialized(true);
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ v8_isolate->Enter();
+ {
+ Heap* heap = isolate->heap();
- v8::Persistent<v8::Context> env;
- {
- HandleScope scope(isolate);
- env.Reset(v8_isolate, v8::Context::New(v8_isolate));
- }
- DCHECK(!env.IsEmpty());
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
- }
- // Make sure all builtin scripts are cached.
- {
- HandleScope scope(isolate);
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- isolate->bootstrapper()->NativesSourceLookup(i);
- }
- }
- heap->CollectAllGarbage(Heap::kNoGCFlags);
- heap->CollectAllGarbage(Heap::kNoGCFlags);
-
- Object* raw_foo;
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::String> foo = v8::String::NewFromUtf8(v8_isolate, "foo");
- DCHECK(!foo.IsEmpty());
- raw_foo = *(v8::Utils::OpenHandle(*foo));
+ v8::Persistent<v8::Context> env;
+ {
+ HandleScope scope(isolate);
+ env.Reset(v8_isolate, v8::Context::New(v8_isolate));
+ }
+ DCHECK(!env.IsEmpty());
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+ }
+ // Make sure all builtin scripts are cached.
+ {
+ HandleScope scope(isolate);
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ isolate->bootstrapper()->NativesSourceLookup(i);
}
+ }
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ Object* raw_foo;
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::String> foo = v8::String::NewFromUtf8(v8_isolate, "foo");
+ DCHECK(!foo.IsEmpty());
+ raw_foo = *(v8::Utils::OpenHandle(*foo));
+ }
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
- }
- env.Reset();
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate, &startup_sink);
- startup_serializer.SerializeStrongReferences();
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+ }
+ env.Reset();
- SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer,
- &partial_sink);
- partial_serializer.Serialize(&raw_foo);
+ SnapshotByteSink startup_sink;
+ StartupSerializer startup_serializer(isolate, &startup_sink);
+ startup_serializer.SerializeStrongReferences();
- startup_serializer.SerializeWeakReferences();
+ SnapshotByteSink partial_sink;
+ PartialSerializer partial_serializer(isolate, &startup_serializer,
+ &partial_sink);
+ partial_serializer.Serialize(&raw_foo);
- SnapshotData startup_snapshot(startup_serializer);
- SnapshotData partial_snapshot(partial_serializer);
+ startup_serializer.SerializeWeakReferences();
- WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
- WritePayload(startup_snapshot.RawData(), startup_name.start());
+ SnapshotData startup_snapshot(startup_serializer);
+ SnapshotData partial_snapshot(partial_serializer);
- startup_name.Dispose();
- }
- v8_isolate->Exit();
- v8_isolate->Dispose();
+ WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
+ WritePayload(startup_snapshot.RawData(), startup_name.start());
+
+ startup_name.Dispose();
}
+ v8_isolate->Exit();
+ v8_isolate->Dispose();
}
UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+ v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+ CHECK(v8_isolate);
+ startup_name.Dispose();
+ {
+ v8::Isolate::Scope isolate_scope(v8_isolate);
- v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
- CHECK(v8_isolate);
- startup_name.Dispose();
+ const char* file_name = FLAG_testing_serialization_file;
+
+ int snapshot_size = 0;
+ byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ HandleScope handle_scope(isolate);
+ Handle<Object> root;
+ Handle<FixedArray> outdated_contexts;
+ // Intentionally empty handle. The deserializer should not come across
+ // any references to the global proxy in this test.
+ Handle<JSGlobalProxy> global_proxy = Handle<JSGlobalProxy>::null();
{
- v8::Isolate::Scope isolate_scope(v8_isolate);
-
- const char* file_name = FLAG_testing_serialization_file;
-
- int snapshot_size = 0;
- byte* snapshot = ReadBytes(file_name, &snapshot_size);
-
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- HandleScope handle_scope(isolate);
- Handle<Object> root;
- Handle<FixedArray> outdated_contexts;
- // Intentionally empty handle. The deserializer should not come across
- // any references to the global proxy in this test.
- Handle<JSGlobalProxy> global_proxy = Handle<JSGlobalProxy>::null();
- {
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
- Deserializer deserializer(&snapshot_data);
- root = deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts)
- .ToHandleChecked();
- CHECK_EQ(0, outdated_contexts->length());
- CHECK(root->IsString());
- }
+ SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ Deserializer deserializer(&snapshot_data);
+ root =
+ deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts).ToHandleChecked();
+ CHECK_EQ(0, outdated_contexts->length());
+ CHECK(root->IsString());
+ }
- Handle<Object> root2;
- {
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
- Deserializer deserializer(&snapshot_data);
- root2 = deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts)
- .ToHandleChecked();
- CHECK(root2->IsString());
- CHECK(root.is_identical_to(root2));
- }
+ Handle<Object> root2;
+ {
+ SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ Deserializer deserializer(&snapshot_data);
+ root2 =
+ deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts).ToHandleChecked();
+ CHECK(root2->IsString());
+ CHECK(root.is_identical_to(root2));
}
- v8_isolate->Dispose();
+
+ DeleteArray(snapshot);
}
+ v8_isolate->Dispose();
}
UNINITIALIZED_TEST(ContextSerialization) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate::CreateParams params;
- params.enable_serializer = true;
- v8::Isolate* v8_isolate = v8::Isolate::New(params);
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- Heap* heap = isolate->heap();
- {
- v8::Isolate::Scope isolate_scope(v8_isolate);
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ v8::Isolate* v8_isolate = TestIsolate::NewInitialized(true);
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ Heap* heap = isolate->heap();
+ {
+ v8::Isolate::Scope isolate_scope(v8_isolate);
- v8::Persistent<v8::Context> env;
- {
- HandleScope scope(isolate);
- env.Reset(v8_isolate, v8::Context::New(v8_isolate));
- }
- DCHECK(!env.IsEmpty());
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
- }
- // Make sure all builtin scripts are cached.
- {
- HandleScope scope(isolate);
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- isolate->bootstrapper()->NativesSourceLookup(i);
- }
+ v8::Persistent<v8::Context> env;
+ {
+ HandleScope scope(isolate);
+ env.Reset(v8_isolate, v8::Context::New(v8_isolate));
+ }
+ DCHECK(!env.IsEmpty());
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+ }
+ // Make sure all builtin scripts are cached.
+ {
+ HandleScope scope(isolate);
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ isolate->bootstrapper()->NativesSourceLookup(i);
}
- // If we don't do this then we end up with a stray root pointing at the
- // context even after we have disposed of env.
- heap->CollectAllGarbage(Heap::kNoGCFlags);
+ }
+ // If we don't do this then we end up with a stray root pointing at the
+ // context even after we have disposed of env.
+ heap->CollectAllGarbage(Heap::kNoGCFlags);
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
- }
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+ }
- i::Object* raw_context = *v8::Utils::OpenPersistent(env);
+ i::Object* raw_context = *v8::Utils::OpenPersistent(env);
- env.Reset();
+ env.Reset();
- SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate, &startup_sink);
- startup_serializer.SerializeStrongReferences();
+ SnapshotByteSink startup_sink;
+ StartupSerializer startup_serializer(isolate, &startup_sink);
+ startup_serializer.SerializeStrongReferences();
- SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer,
- &partial_sink);
- partial_serializer.Serialize(&raw_context);
- startup_serializer.SerializeWeakReferences();
+ SnapshotByteSink partial_sink;
+ PartialSerializer partial_serializer(isolate, &startup_serializer,
+ &partial_sink);
+ partial_serializer.Serialize(&raw_context);
+ startup_serializer.SerializeWeakReferences();
- SnapshotData startup_snapshot(startup_serializer);
- SnapshotData partial_snapshot(partial_serializer);
+ SnapshotData startup_snapshot(startup_serializer);
+ SnapshotData partial_snapshot(partial_serializer);
- WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
- WritePayload(startup_snapshot.RawData(), startup_name.start());
+ WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
+ WritePayload(startup_snapshot.RawData(), startup_name.start());
- startup_name.Dispose();
- }
- v8_isolate->Dispose();
+ startup_name.Dispose();
}
+ v8_isolate->Dispose();
}
UNINITIALIZED_DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+ v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+ CHECK(v8_isolate);
+ startup_name.Dispose();
+ {
+ v8::Isolate::Scope isolate_scope(v8_isolate);
- v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
- CHECK(v8_isolate);
- startup_name.Dispose();
+ const char* file_name = FLAG_testing_serialization_file;
+
+ int snapshot_size = 0;
+ byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ HandleScope handle_scope(isolate);
+ Handle<Object> root;
+ Handle<FixedArray> outdated_contexts;
+ Handle<JSGlobalProxy> global_proxy =
+ isolate->factory()->NewUninitializedJSGlobalProxy();
{
- v8::Isolate::Scope isolate_scope(v8_isolate);
-
- const char* file_name = FLAG_testing_serialization_file;
-
- int snapshot_size = 0;
- byte* snapshot = ReadBytes(file_name, &snapshot_size);
-
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- HandleScope handle_scope(isolate);
- Handle<Object> root;
- Handle<FixedArray> outdated_contexts;
- Handle<JSGlobalProxy> global_proxy =
- isolate->factory()->NewUninitializedJSGlobalProxy();
- {
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
- Deserializer deserializer(&snapshot_data);
- root = deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts)
- .ToHandleChecked();
- CHECK(root->IsContext());
- CHECK(Handle<Context>::cast(root)->global_proxy() == *global_proxy);
- CHECK_EQ(1, outdated_contexts->length());
- }
+ SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ Deserializer deserializer(&snapshot_data);
+ root =
+ deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts).ToHandleChecked();
+ CHECK(root->IsContext());
+ CHECK(Handle<Context>::cast(root)->global_proxy() == *global_proxy);
+ CHECK_EQ(1, outdated_contexts->length());
+ }
- Handle<Object> root2;
- {
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
- Deserializer deserializer(&snapshot_data);
- root2 = deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts)
- .ToHandleChecked();
- CHECK(root2->IsContext());
- CHECK(!root.is_identical_to(root2));
- }
+ Handle<Object> root2;
+ {
+ SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ Deserializer deserializer(&snapshot_data);
+ root2 =
+ deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts).ToHandleChecked();
+ CHECK(root2->IsContext());
+ CHECK(!root.is_identical_to(root2));
}
- v8_isolate->Dispose();
+ DeleteArray(snapshot);
}
+ v8_isolate->Dispose();
}
UNINITIALIZED_TEST(CustomContextSerialization) {
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- v8::Isolate::CreateParams params;
- params.enable_serializer = true;
- v8::Isolate* v8_isolate = v8::Isolate::New(params);
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- {
- v8::Isolate::Scope isolate_scope(v8_isolate);
+ DisableTurbofan();
+ if (DefaultSnapshotAvailable()) return;
+ v8::Isolate* v8_isolate = TestIsolate::NewInitialized(true);
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ {
+ v8::Isolate::Scope isolate_scope(v8_isolate);
- v8::Persistent<v8::Context> env;
- {
- HandleScope scope(isolate);
- env.Reset(v8_isolate, v8::Context::New(v8_isolate));
- }
- DCHECK(!env.IsEmpty());
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
- // After execution, e's function context refers to the global object.
- CompileRun(
- "var e;"
- "(function() {"
- " e = function(s) { return eval (s); }"
- "})();"
- "var o = this;"
- "var r = Math.random() + Math.cos(0);"
- "var f = (function(a, b) { return a + b; }).bind(1, 2, 3);"
- "var s = parseInt('12345');");
-
- Vector<const uint8_t> source = ConstructSource(
- STATIC_CHAR_VECTOR("function g() { return [,"),
- STATIC_CHAR_VECTOR("1,"),
- STATIC_CHAR_VECTOR("];} a = g(); b = g(); b.push(1);"), 100000);
- v8::Handle<v8::String> source_str = v8::String::NewFromOneByte(
- v8_isolate, source.start(), v8::String::kNormalString,
- source.length());
- CompileRun(source_str);
- source.Dispose();
- }
- // Make sure all builtin scripts are cached.
- {
- HandleScope scope(isolate);
- for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
- isolate->bootstrapper()->NativesSourceLookup(i);
- }
+ v8::Persistent<v8::Context> env;
+ {
+ HandleScope scope(isolate);
+ env.Reset(v8_isolate, v8::Context::New(v8_isolate));
+ }
+ DCHECK(!env.IsEmpty());
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+ // After execution, e's function context refers to the global object.
+ CompileRun(
+ "var e;"
+ "(function() {"
+ " e = function(s) { return eval (s); }"
+ "})();"
+ "var o = this;"
+ "var r = Math.random() + Math.cos(0);"
+ "var f = (function(a, b) { return a + b; }).bind(1, 2, 3);"
+ "var s = parseInt('12345');");
+
+ Vector<const uint8_t> source = ConstructSource(
+ STATIC_CHAR_VECTOR("function g() { return [,"),
+ STATIC_CHAR_VECTOR("1,"),
+ STATIC_CHAR_VECTOR("];} a = g(); b = g(); b.push(1);"), 100000);
+ v8::Handle<v8::String> source_str = v8::String::NewFromOneByte(
+ v8_isolate, source.start(), v8::String::kNormalString,
+ source.length());
+ CompileRun(source_str);
+ source.Dispose();
+ }
+ // Make sure all builtin scripts are cached.
+ {
+ HandleScope scope(isolate);
+ for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+ isolate->bootstrapper()->NativesSourceLookup(i);
}
- // If we don't do this then we end up with a stray root pointing at the
- // context even after we have disposed of env.
- isolate->heap()->CollectAllAvailableGarbage("snapshotting");
+ }
+ // If we don't do this then we end up with a stray root pointing at the
+ // context even after we have disposed of env.
+ isolate->heap()->CollectAllAvailableGarbage("snapshotting");
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
- {
- v8::HandleScope handle_scope(v8_isolate);
- v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
- }
+ {
+ v8::HandleScope handle_scope(v8_isolate);
+ v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+ }
- i::Object* raw_context = *v8::Utils::OpenPersistent(env);
+ i::Object* raw_context = *v8::Utils::OpenPersistent(env);
- env.Reset();
+ env.Reset();
- SnapshotByteSink startup_sink;
- StartupSerializer startup_serializer(isolate, &startup_sink);
- startup_serializer.SerializeStrongReferences();
+ SnapshotByteSink startup_sink;
+ StartupSerializer startup_serializer(isolate, &startup_sink);
+ startup_serializer.SerializeStrongReferences();
- SnapshotByteSink partial_sink;
- PartialSerializer partial_serializer(isolate, &startup_serializer,
- &partial_sink);
- partial_serializer.Serialize(&raw_context);
- startup_serializer.SerializeWeakReferences();
+ SnapshotByteSink partial_sink;
+ PartialSerializer partial_serializer(isolate, &startup_serializer,
+ &partial_sink);
+ partial_serializer.Serialize(&raw_context);
+ startup_serializer.SerializeWeakReferences();
- SnapshotData startup_snapshot(startup_serializer);
- SnapshotData partial_snapshot(partial_serializer);
+ SnapshotData startup_snapshot(startup_serializer);
+ SnapshotData partial_snapshot(partial_serializer);
- WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
- WritePayload(startup_snapshot.RawData(), startup_name.start());
+ WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
+ WritePayload(startup_snapshot.RawData(), startup_name.start());
- startup_name.Dispose();
- }
- v8_isolate->Dispose();
+ startup_name.Dispose();
}
+ v8_isolate->Dispose();
}
UNINITIALIZED_DEPENDENT_TEST(CustomContextDeserialization,
CustomContextSerialization) {
+ DisableTurbofan();
FLAG_crankshaft = false;
- if (!Snapshot::HaveASnapshotToStartFrom()) {
- int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
- Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
- SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+ if (DefaultSnapshotAvailable()) return;
+ int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+ Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+ SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+ v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+ CHECK(v8_isolate);
+ startup_name.Dispose();
+ {
+ v8::Isolate::Scope isolate_scope(v8_isolate);
- v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
- CHECK(v8_isolate);
- startup_name.Dispose();
+ const char* file_name = FLAG_testing_serialization_file;
+
+ int snapshot_size = 0;
+ byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+ Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+ HandleScope handle_scope(isolate);
+ Handle<Object> root;
+ Handle<FixedArray> outdated_contexts;
+ Handle<JSGlobalProxy> global_proxy =
+ isolate->factory()->NewUninitializedJSGlobalProxy();
{
- v8::Isolate::Scope isolate_scope(v8_isolate);
-
- const char* file_name = FLAG_testing_serialization_file;
-
- int snapshot_size = 0;
- byte* snapshot = ReadBytes(file_name, &snapshot_size);
-
- Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
- HandleScope handle_scope(isolate);
- Handle<Object> root;
- Handle<FixedArray> outdated_contexts;
- Handle<JSGlobalProxy> global_proxy =
- isolate->factory()->NewUninitializedJSGlobalProxy();
- {
- SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
- Deserializer deserializer(&snapshot_data);
- root = deserializer.DeserializePartial(isolate, global_proxy,
- &outdated_contexts)
- .ToHandleChecked();
- CHECK_EQ(2, outdated_contexts->length());
- CHECK(root->IsContext());
- Handle<Context> context = Handle<Context>::cast(root);
- CHECK(context->global_proxy() == *global_proxy);
- Handle<String> o = isolate->factory()->NewStringFromAsciiChecked("o");
- Handle<JSObject> global_object(context->global_object(), isolate);
- Handle<Object> property = JSObject::GetDataProperty(global_object, o);
- CHECK(property.is_identical_to(global_proxy));
-
- v8::Handle<v8::Context> v8_context = v8::Utils::ToLocal(context);
- v8::Context::Scope context_scope(v8_context);
- double r = CompileRun("r")->ToNumber(v8_isolate)->Value();
- CHECK(r >= 1 && r <= 2);
- int f = CompileRun("f()")->ToNumber(v8_isolate)->Int32Value();
- CHECK_EQ(5, f);
- f = CompileRun("e('f()')")->ToNumber(v8_isolate)->Int32Value();
- CHECK_EQ(5, f);
- v8::Handle<v8::String> s = CompileRun("s")->ToString(v8_isolate);
- CHECK(s->Equals(v8_str("12345")));
- int a = CompileRun("a.length")->ToNumber(v8_isolate)->Int32Value();
- CHECK_EQ(100001, a);
- int b = CompileRun("b.length")->ToNumber(v8_isolate)->Int32Value();
- CHECK_EQ(100002, b);
- }
+ SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+ Deserializer deserializer(&snapshot_data);
+ root =
+ deserializer.DeserializePartial(isolate, global_proxy,
+ &outdated_contexts).ToHandleChecked();
+ CHECK_EQ(2, outdated_contexts->length());
+ CHECK(root->IsContext());
+ Handle<Context> context = Handle<Context>::cast(root);
+ CHECK(context->global_proxy() == *global_proxy);
+ Handle<String> o = isolate->factory()->NewStringFromAsciiChecked("o");
+ Handle<JSObject> global_object(context->global_object(), isolate);
+ Handle<Object> property = JSObject::GetDataProperty(global_object, o);
+ CHECK(property.is_identical_to(global_proxy));
+
+ v8::Handle<v8::Context> v8_context = v8::Utils::ToLocal(context);
+ v8::Context::Scope context_scope(v8_context);
+ double r = CompileRun("r")->ToNumber(v8_isolate)->Value();
+ CHECK(r >= 1 && r <= 2);
+ int f = CompileRun("f()")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(5, f);
+ f = CompileRun("e('f()')")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(5, f);
+ v8::Handle<v8::String> s = CompileRun("s")->ToString(v8_isolate);
+ CHECK(s->Equals(v8_str("12345")));
+ int a = CompileRun("a.length")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(100001, a);
+ int b = CompileRun("b.length")->ToNumber(v8_isolate)->Int32Value();
+ CHECK_EQ(100002, b);
}
- v8_isolate->Dispose();
+ DeleteArray(snapshot);
+ }
+ v8_isolate->Dispose();
+}
+
+
+TEST(PerIsolateSnapshotBlobs) {
+ DisableTurbofan();
+ const char* source1 = "function f() { return 42; }";
+ const char* source2 =
+ "function f() { return g() * 2; }"
+ "function g() { return 43; }"
+ "/./.test('a')";
+
+ v8::StartupData data1 = v8::V8::CreateSnapshotDataBlob(source1);
+ v8::StartupData data2 = v8::V8::CreateSnapshotDataBlob(source2);
+
+ v8::Isolate::CreateParams params1;
+ params1.snapshot_blob = &data1;
+ v8::Isolate* isolate1 = v8::Isolate::New(params1);
+ {
+ v8::Isolate::Scope i_scope(isolate1);
+ v8::HandleScope h_scope(isolate1);
+ v8::Local<v8::Context> context = v8::Context::New(isolate1);
+ delete[] data1.data; // We can dispose of the snapshot blob now.
+ v8::Context::Scope c_scope(context);
+ CHECK_EQ(42, CompileRun("f()")->ToInt32(isolate1)->Int32Value());
+ CHECK(CompileRun("this.g")->IsUndefined());
+ }
+ isolate1->Dispose();
+
+ v8::Isolate::CreateParams params2;
+ params2.snapshot_blob = &data2;
+ v8::Isolate* isolate2 = v8::Isolate::New(params2);
+ {
+ v8::Isolate::Scope i_scope(isolate2);
+ v8::HandleScope h_scope(isolate2);
+ v8::Local<v8::Context> context = v8::Context::New(isolate2);
+ delete[] data2.data; // We can dispose of the snapshot blob now.
+ v8::Context::Scope c_scope(context);
+ CHECK_EQ(86, CompileRun("f()")->ToInt32(isolate2)->Int32Value());
+ CHECK_EQ(43, CompileRun("g()")->ToInt32(isolate2)->Int32Value());
+ }
+ isolate2->Dispose();
+}
+
+
+TEST(PerIsolateSnapshotBlobsWithLocker) {
+ DisableTurbofan();
+ v8::Isolate* isolate0 = v8::Isolate::New();
+ {
+ v8::Locker locker(isolate0);
+ v8::Isolate::Scope i_scope(isolate0);
+ v8::HandleScope h_scope(isolate0);
+ v8::Local<v8::Context> context = v8::Context::New(isolate0);
+ v8::Context::Scope c_scope(context);
+ CHECK_EQ(1, CompileRun("Math.cos(0)")->ToInt32(isolate0)->Int32Value());
}
+ isolate0->Dispose();
+
+ const char* source1 = "function f() { return 42; }";
+
+ v8::StartupData data1 = v8::V8::CreateSnapshotDataBlob(source1);
+
+ v8::Isolate::CreateParams params1;
+ params1.snapshot_blob = &data1;
+ v8::Isolate* isolate1 = v8::Isolate::New(params1);
+ {
+ v8::Locker locker(isolate1);
+ v8::Isolate::Scope i_scope(isolate1);
+ v8::HandleScope h_scope(isolate1);
+ v8::Local<v8::Context> context = v8::Context::New(isolate1);
+ delete[] data1.data; // We can dispose of the snapshot blob now.
+ v8::Context::Scope c_scope(context);
+ CHECK_EQ(42, CompileRun("f()")->ToInt32(isolate1)->Int32Value());
+ }
+ isolate1->Dispose();
}
@@ -735,10 +761,10 @@ int CountBuiltins() {
static Handle<SharedFunctionInfo> CompileScript(
Isolate* isolate, Handle<String> source, Handle<String> name,
ScriptData** cached_data, v8::ScriptCompiler::CompileOptions options) {
- return Compiler::CompileScript(source, name, 0, 0, false, false,
- Handle<Context>(isolate->native_context()),
- NULL, cached_data, options, NOT_NATIVES_CODE,
- false);
+ return Compiler::CompileScript(
+ source, name, 0, 0, false, false, Handle<Object>(),
+ Handle<Context>(isolate->native_context()), NULL, cached_data, options,
+ NOT_NATIVES_CODE, false);
}
@@ -1346,7 +1372,9 @@ TEST(SerializeToplevelFlagChange) {
v8::ScriptCompiler::CachedData* cache = ProduceCache(source);
v8::Isolate* isolate2 = v8::Isolate::New();
+
FLAG_allow_natives_syntax = true; // Flag change should trigger cache reject.
+ FlagList::EnforceFlagImplications();
{
v8::Isolate::Scope iscope(isolate2);
v8::HandleScope scope(isolate2);
@@ -1393,7 +1421,6 @@ TEST(SerializeToplevelBitFlip) {
TEST(SerializeWithHarmonyScoping) {
FLAG_serialize_toplevel = true;
- FLAG_harmony_scoping = true;
const char* source1 = "'use strict'; let x = 'X'";
const char* source2 = "'use strict'; let y = 'Y'";
@@ -1454,3 +1481,71 @@ TEST(SerializeWithHarmonyScoping) {
}
isolate2->Dispose();
}
+
+
+TEST(SerializeInternalReference) {
+#ifdef V8_TARGET_ARCH_ARM64
+ return;
+#endif // V8_TARGET_ARCH_ARM64
+ // Disable experimental natives that are loaded after deserialization.
+ FLAG_turbo_deoptimization = false;
+ FLAG_context_specialization = false;
+ FLAG_always_opt = true;
+ const char* flag = "--turbo-filter=foo";
+ FlagList::SetFlagsFromString(flag, StrLength(flag));
+
+ const char* source =
+ "var foo = (function(stdlib, foreign, heap) {"
+ " function foo(i) {"
+ " i = i|0;"
+ " var j = 0;"
+ " switch (i) {"
+ " case 0:"
+ " case 1: j = 1; break;"
+ " case 2:"
+ " case 3: j = 2; break;"
+ " case 4:"
+ " case 5: j = foo(3) + 1; break;"
+ " default: j = 0; break;"
+ " }"
+ " return j + 10;"
+ " }"
+ " return { foo: foo };"
+ "})(this, {}, undefined).foo;"
+ "foo(1);";
+
+ v8::StartupData data = v8::V8::CreateSnapshotDataBlob(source);
+ CHECK(data.data);
+
+ v8::Isolate::CreateParams params;
+ params.snapshot_blob = &data;
+ v8::Isolate* isolate = v8::Isolate::New(params);
+ {
+ v8::Isolate::Scope i_scope(isolate);
+ v8::HandleScope h_scope(isolate);
+ v8::Local<v8::Context> context = v8::Context::New(isolate);
+ delete[] data.data; // We can dispose of the snapshot blob now.
+ v8::Context::Scope c_scope(context);
+ v8::Handle<v8::Function> foo =
+ v8::Handle<v8::Function>::Cast(CompileRun("foo"));
+
+ // There are at least 6 internal references.
+ int mask = RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
+ RelocIterator it(v8::Utils::OpenHandle(*foo)->code(), mask);
+ for (int i = 0; i < 6; ++i) {
+ CHECK(!it.done());
+ it.next();
+ }
+
+ CHECK(v8::Utils::OpenHandle(*foo)->code()->is_turbofanned());
+ CHECK_EQ(11, CompileRun("foo(0)")->ToInt32(isolate)->Int32Value());
+ CHECK_EQ(11, CompileRun("foo(1)")->ToInt32(isolate)->Int32Value());
+ CHECK_EQ(12, CompileRun("foo(2)")->ToInt32(isolate)->Int32Value());
+ CHECK_EQ(12, CompileRun("foo(3)")->ToInt32(isolate)->Int32Value());
+ CHECK_EQ(23, CompileRun("foo(4)")->ToInt32(isolate)->Int32Value());
+ CHECK_EQ(23, CompileRun("foo(5)")->ToInt32(isolate)->Int32Value());
+ CHECK_EQ(10, CompileRun("foo(6)")->ToInt32(isolate)->Int32Value());
+ }
+ isolate->Dispose();
+}
diff --git a/deps/v8/test/cctest/test-spaces.cc b/deps/v8/test/cctest/test-spaces.cc
index 331ea02510..92305f9f1e 100644
--- a/deps/v8/test/cctest/test-spaces.cc
+++ b/deps/v8/test/cctest/test-spaces.cc
@@ -28,7 +28,7 @@
#include <stdlib.h>
#include "src/base/platform/platform.h"
-#include "src/snapshot.h"
+#include "src/snapshot/snapshot.h"
#include "src/v8.h"
#include "test/cctest/cctest.h"
@@ -207,12 +207,13 @@ static void VerifyMemoryChunk(Isolate* isolate,
TEST(Regress3540) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
+ const int pageSize = Page::kPageSize;
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(
memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
CodeRange* code_range = new CodeRange(isolate);
- const size_t code_range_size = 4 * MB;
+ const size_t code_range_size = 4 * pageSize;
if (!code_range->SetUp(
code_range_size +
RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
@@ -222,13 +223,13 @@ TEST(Regress3540) {
}
Address address;
size_t size;
- address = code_range->AllocateRawMemory(code_range_size - 2 * MB,
- code_range_size - 2 * MB, &size);
+ address = code_range->AllocateRawMemory(
+ code_range_size - 2 * pageSize, code_range_size - 2 * pageSize, &size);
CHECK(address != NULL);
Address null_address;
size_t null_size;
null_address = code_range->AllocateRawMemory(
- code_range_size - MB, code_range_size - MB, &null_size);
+ code_range_size - pageSize, code_range_size - pageSize, &null_size);
CHECK(null_address == NULL);
code_range->FreeRawMemory(address, size);
delete code_range;
@@ -307,9 +308,7 @@ TEST(MemoryAllocator) {
heap->MaxExecutableSize()));
int total_pages = 0;
- OldSpace faked_space(heap,
- heap->MaxReserved(),
- OLD_POINTER_SPACE,
+ OldSpace faked_space(heap, heap->MaxReserved(), OLD_POINTER_SPACE,
NOT_EXECUTABLE);
Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
@@ -379,10 +378,8 @@ TEST(OldSpace) {
heap->MaxExecutableSize()));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
- OldSpace* s = new OldSpace(heap,
- heap->MaxOldGenerationSize(),
- OLD_POINTER_SPACE,
- NOT_EXECUTABLE);
+ OldSpace* s = new OldSpace(heap, heap->MaxOldGenerationSize(),
+ OLD_POINTER_SPACE, NOT_EXECUTABLE);
CHECK(s != NULL);
CHECK(s->SetUp());
@@ -422,7 +419,9 @@ TEST(LargeObjectSpace) {
{ AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
if (allocation.IsRetry()) break;
}
- CHECK(lo->Available() < available);
+ // The available value is conservative such that it may report
+ // zero prior to heap exhaustion.
+ CHECK(lo->Available() < available || available == 0);
}
CHECK(!lo->IsEmpty());
@@ -434,9 +433,10 @@ TEST(LargeObjectSpace) {
TEST(SizeOfFirstPageIsLargeEnough) {
if (i::FLAG_always_opt) return;
// Bootstrapping without a snapshot causes more allocations.
- if (!i::Snapshot::HaveASnapshotToStartFrom()) return;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
+ if (!isolate->snapshot_available()) return;
+ if (Snapshot::EmbedsScript(isolate)) return;
// Freshly initialized VM gets by with one page per space.
for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
@@ -460,7 +460,7 @@ TEST(SizeOfFirstPageIsLargeEnough) {
UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
- FLAG_target_semi_space_size = 2;
+ FLAG_target_semi_space_size = 2 * (Page::kPageSize / MB);
if (FLAG_optimize_for_size) return;
v8::Isolate* isolate = v8::Isolate::New();
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 9a4e96ffd5..8d5129cd05 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1209,7 +1209,7 @@ UNINITIALIZED_TEST(OneByteArrayJoin) {
v8::Isolate::CreateParams create_params;
// Set heap limits.
create_params.constraints.set_max_semi_space_size(1);
- create_params.constraints.set_max_old_space_size(5);
+ create_params.constraints.set_max_old_space_size(6);
v8::Isolate* isolate = v8::Isolate::New(create_params);
isolate->Enter();
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index d31b4131df..0d20e0f043 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -192,6 +192,7 @@ TEST(TerminateOnlyV8ThreadFromOtherThread) {
// Run a loop that will be infinite if thread termination does not work.
v8::Handle<v8::String> source = v8::String::NewFromUtf8(
CcTest::isolate(), "try { loop(); fail(); } catch(e) { fail(); }");
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
v8::Script::Compile(source)->Run();
thread.Join();
@@ -228,6 +229,7 @@ void LoopGetProperty(const v8::FunctionCallbackInfo<v8::Value>& args) {
" }"
" fail();"
" } catch(e) {"
+ " (function() {})();" // trigger stack check.
" fail();"
" }"
"}"
@@ -375,6 +377,7 @@ void MicrotaskLoopForever(const v8::FunctionCallbackInfo<v8::Value>& info) {
// Enqueue another should-not-run task to ensure we clean out the queue
// when we terminate.
isolate->EnqueueMicrotask(v8::Function::New(isolate, MicrotaskShouldNotRun));
+ i::FLAG_turbo_osr = false; // TODO(titzer): interrupts in TF loops.
CompileRun("terminate(); while (true) { }");
CHECK(v8::V8::IsExecutionTerminating());
}
@@ -474,3 +477,65 @@ TEST(ErrorObjectAfterTermination) {
// TODO(yangguo): crbug/403509. Check for empty handle instead.
CHECK(error->IsUndefined());
}
+
+
+void InnerTryCallTerminate(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
+ v8::Handle<v8::Object> global = CcTest::global();
+ v8::Handle<v8::Function> loop =
+ v8::Handle<v8::Function>::Cast(global->Get(v8_str("loop")));
+ i::MaybeHandle<i::Object> result =
+ i::Execution::TryCall(v8::Utils::OpenHandle((*loop)),
+ v8::Utils::OpenHandle((*global)), 0, NULL, NULL);
+ CHECK(result.is_null());
+ // TryCall ignores terminate execution, but rerequests the interrupt.
+ CHECK(!v8::V8::IsExecutionTerminating(args.GetIsolate()));
+ CHECK(CompileRun("1 + 1;").IsEmpty());
+}
+
+
+TEST(TerminationInInnerTryCall) {
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global_template = CreateGlobalTemplate(
+ CcTest::isolate(), TerminateCurrentThread, DoLoopNoCall);
+ global_template->Set(
+ v8_str("inner_try_call_terminate"),
+ v8::FunctionTemplate::New(isolate, InnerTryCallTerminate));
+ v8::Handle<v8::Context> context =
+ v8::Context::New(CcTest::isolate(), NULL, global_template);
+ v8::Context::Scope context_scope(context);
+ {
+ v8::TryCatch try_catch;
+ CompileRun("inner_try_call_terminate()");
+ CHECK(try_catch.HasTerminated());
+ }
+ CHECK_EQ(4, CompileRun("2 + 2")->ToInt32()->Int32Value());
+ CHECK(!v8::V8::IsExecutionTerminating());
+}
+
+
+TEST(TerminateAndTryCall) {
+ i::FLAG_allow_natives_syntax = true;
+ v8::Isolate* isolate = CcTest::isolate();
+ v8::HandleScope scope(isolate);
+ v8::Handle<v8::ObjectTemplate> global = CreateGlobalTemplate(
+ isolate, TerminateCurrentThread, DoLoopCancelTerminate);
+ v8::Handle<v8::Context> context = v8::Context::New(isolate, NULL, global);
+ v8::Context::Scope context_scope(context);
+ CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ v8::TryCatch try_catch;
+ CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ // Terminate execution has been triggered inside TryCall, but re-requested
+ // to trigger later.
+ CHECK(CompileRun("terminate(); reference_error();").IsEmpty());
+ CHECK(try_catch.HasCaught());
+ CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ CHECK(CcTest::global()->Get(v8_str("terminate"))->IsFunction());
+ // The first stack check after terminate has been re-requested fails.
+ CHECK(CompileRun("1 + 1").IsEmpty());
+ CHECK(!v8::V8::IsExecutionTerminating(isolate));
+ // V8 then recovers.
+ CHECK_EQ(4, CompileRun("2 + 2")->ToInt32()->Int32Value());
+ CHECK(!v8::V8::IsExecutionTerminating(isolate));
+}
diff --git a/deps/v8/test/cctest/test-transitions.cc b/deps/v8/test/cctest/test-transitions.cc
index 59c9f74c96..800c2a0e44 100644
--- a/deps/v8/test/cctest/test-transitions.cc
+++ b/deps/v8/test/cctest/test-transitions.cc
@@ -20,16 +20,6 @@ using namespace v8::internal;
// Helper functions.
//
-static void ConnectTransition(Handle<Map> parent,
- Handle<TransitionArray> transitions,
- Handle<Map> child) {
- if (!parent->HasTransitionArray() || *transitions != parent->transitions()) {
- parent->set_transitions(*transitions);
- }
- child->SetBackPointer(*parent);
-}
-
-
static void CheckPropertyDetailsFieldsConsistency(PropertyType type,
PropertyKind kind,
PropertyLocation location) {
@@ -69,34 +59,32 @@ TEST(TransitionArray_SimpleFieldTransitions) {
attributes, Representation::Tagged(),
OMIT_TRANSITION).ToHandleChecked();
- CHECK(!map0->HasTransitionArray());
- Handle<TransitionArray> transitions = TransitionArray::Allocate(isolate, 0);
- CHECK(transitions->IsFullTransitionArray());
-
- int transition;
- transitions =
- transitions->Insert(map0, name1, map1, SIMPLE_PROPERTY_TRANSITION);
- ConnectTransition(map0, transitions, map1);
- CHECK(transitions->IsSimpleTransition());
- transition = transitions->Search(kData, *name1, attributes);
- CHECK_EQ(TransitionArray::kSimpleTransitionIndex, transition);
- CHECK_EQ(*name1, transitions->GetKey(transition));
- CHECK_EQ(*map1, transitions->GetTarget(transition));
-
- transitions =
- transitions->Insert(map0, name2, map2, SIMPLE_PROPERTY_TRANSITION);
- ConnectTransition(map0, transitions, map2);
- CHECK(transitions->IsFullTransitionArray());
-
- transition = transitions->Search(kData, *name1, attributes);
- CHECK_EQ(*name1, transitions->GetKey(transition));
- CHECK_EQ(*map1, transitions->GetTarget(transition));
-
- transition = transitions->Search(kData, *name2, attributes);
- CHECK_EQ(*name2, transitions->GetKey(transition));
- CHECK_EQ(*map2, transitions->GetTarget(transition));
-
- DCHECK(transitions->IsSortedNoDuplicates());
+ CHECK(map0->raw_transitions()->IsSmi());
+
+ TransitionArray::Insert(map0, name1, map1, SIMPLE_PROPERTY_TRANSITION);
+ CHECK(TransitionArray::IsSimpleTransition(map0->raw_transitions()));
+ CHECK_EQ(*map1,
+ TransitionArray::SearchTransition(*map0, kData, *name1, attributes));
+ CHECK_EQ(1, TransitionArray::NumberOfTransitions(map0->raw_transitions()));
+ CHECK_EQ(*name1, TransitionArray::GetKey(map0->raw_transitions(), 0));
+ CHECK_EQ(*map1, TransitionArray::GetTarget(map0->raw_transitions(), 0));
+
+ TransitionArray::Insert(map0, name2, map2, SIMPLE_PROPERTY_TRANSITION);
+ CHECK(TransitionArray::IsFullTransitionArray(map0->raw_transitions()));
+
+ CHECK_EQ(*map1,
+ TransitionArray::SearchTransition(*map0, kData, *name1, attributes));
+ CHECK_EQ(*map2,
+ TransitionArray::SearchTransition(*map0, kData, *name2, attributes));
+ CHECK_EQ(2, TransitionArray::NumberOfTransitions(map0->raw_transitions()));
+ for (int i = 0; i < 2; i++) {
+ Name* key = TransitionArray::GetKey(map0->raw_transitions(), i);
+ Map* target = TransitionArray::GetTarget(map0->raw_transitions(), i);
+ CHECK((key == *name1 && target == *map1) ||
+ (key == *name2 && target == *map2));
+ }
+
+ DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
}
@@ -120,31 +108,32 @@ TEST(TransitionArray_FullFieldTransitions) {
attributes, Representation::Tagged(),
OMIT_TRANSITION).ToHandleChecked();
- CHECK(!map0->HasTransitionArray());
- Handle<TransitionArray> transitions = TransitionArray::Allocate(isolate, 0);
- CHECK(transitions->IsFullTransitionArray());
-
- int transition;
- transitions = transitions->Insert(map0, name1, map1, PROPERTY_TRANSITION);
- ConnectTransition(map0, transitions, map1);
- CHECK(transitions->IsFullTransitionArray());
- transition = transitions->Search(kData, *name1, attributes);
- CHECK_EQ(*name1, transitions->GetKey(transition));
- CHECK_EQ(*map1, transitions->GetTarget(transition));
-
- transitions = transitions->Insert(map0, name2, map2, PROPERTY_TRANSITION);
- ConnectTransition(map0, transitions, map2);
- CHECK(transitions->IsFullTransitionArray());
-
- transition = transitions->Search(kData, *name1, attributes);
- CHECK_EQ(*name1, transitions->GetKey(transition));
- CHECK_EQ(*map1, transitions->GetTarget(transition));
-
- transition = transitions->Search(kData, *name2, attributes);
- CHECK_EQ(*name2, transitions->GetKey(transition));
- CHECK_EQ(*map2, transitions->GetTarget(transition));
+ CHECK(map0->raw_transitions()->IsSmi());
+
+ TransitionArray::Insert(map0, name1, map1, PROPERTY_TRANSITION);
+ CHECK(TransitionArray::IsFullTransitionArray(map0->raw_transitions()));
+ CHECK_EQ(*map1,
+ TransitionArray::SearchTransition(*map0, kData, *name1, attributes));
+ CHECK_EQ(1, TransitionArray::NumberOfTransitions(map0->raw_transitions()));
+ CHECK_EQ(*name1, TransitionArray::GetKey(map0->raw_transitions(), 0));
+ CHECK_EQ(*map1, TransitionArray::GetTarget(map0->raw_transitions(), 0));
+
+ TransitionArray::Insert(map0, name2, map2, PROPERTY_TRANSITION);
+ CHECK(TransitionArray::IsFullTransitionArray(map0->raw_transitions()));
+
+ CHECK_EQ(*map1,
+ TransitionArray::SearchTransition(*map0, kData, *name1, attributes));
+ CHECK_EQ(*map2,
+ TransitionArray::SearchTransition(*map0, kData, *name2, attributes));
+ CHECK_EQ(2, TransitionArray::NumberOfTransitions(map0->raw_transitions()));
+ for (int i = 0; i < 2; i++) {
+ Name* key = TransitionArray::GetKey(map0->raw_transitions(), i);
+ Map* target = TransitionArray::GetTarget(map0->raw_transitions(), i);
+ CHECK((key == *name1 && target == *map1) ||
+ (key == *name2 && target == *map2));
+ }
- DCHECK(transitions->IsSortedNoDuplicates());
+ DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
}
@@ -160,9 +149,7 @@ TEST(TransitionArray_DifferentFieldNames) {
PropertyAttributes attributes = NONE;
Handle<Map> map0 = Map::Create(isolate, 0);
- CHECK(!map0->HasTransitionArray());
- Handle<TransitionArray> transitions = TransitionArray::Allocate(isolate, 0);
- CHECK(transitions->IsFullTransitionArray());
+ CHECK(map0->raw_transitions()->IsSmi());
for (int i = 0; i < PROPS_COUNT; i++) {
EmbeddedVector<char, 64> buffer;
@@ -175,17 +162,25 @@ TEST(TransitionArray_DifferentFieldNames) {
names[i] = name;
maps[i] = map;
- transitions = transitions->Insert(map0, name, map, PROPERTY_TRANSITION);
- ConnectTransition(map0, transitions, map);
+ TransitionArray::Insert(map0, name, map, PROPERTY_TRANSITION);
}
for (int i = 0; i < PROPS_COUNT; i++) {
- int transition = transitions->Search(kData, *names[i], attributes);
- CHECK_EQ(*names[i], transitions->GetKey(transition));
- CHECK_EQ(*maps[i], transitions->GetTarget(transition));
+ CHECK_EQ(*maps[i], TransitionArray::SearchTransition(
+ *map0, kData, *names[i], attributes));
+ }
+ for (int i = 0; i < PROPS_COUNT; i++) {
+ Name* key = TransitionArray::GetKey(map0->raw_transitions(), i);
+ Map* target = TransitionArray::GetTarget(map0->raw_transitions(), i);
+ for (int j = 0; j < PROPS_COUNT; j++) {
+ if (*names[i] == key) {
+ CHECK_EQ(*maps[i], target);
+ break;
+ }
+ }
}
- DCHECK(transitions->IsSortedNoDuplicates());
+ DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
}
@@ -196,9 +191,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributesSimple) {
Factory* factory = isolate->factory();
Handle<Map> map0 = Map::Create(isolate, 0);
- CHECK(!map0->HasTransitionArray());
- Handle<TransitionArray> transitions = TransitionArray::Allocate(isolate, 0);
- CHECK(transitions->IsFullTransitionArray());
+ CHECK(map0->raw_transitions()->IsSmi());
const int ATTRS_COUNT = (READ_ONLY | DONT_ENUM | DONT_DELETE) + 1;
STATIC_ASSERT(ATTRS_COUNT == 8);
@@ -215,20 +208,20 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributesSimple) {
OMIT_TRANSITION).ToHandleChecked();
attr_maps[i] = map;
- transitions = transitions->Insert(map0, name, map, PROPERTY_TRANSITION);
- ConnectTransition(map0, transitions, map);
+ TransitionArray::Insert(map0, name, map, PROPERTY_TRANSITION);
}
// Ensure that transitions for |name| field are valid.
for (int i = 0; i < ATTRS_COUNT; i++) {
PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
-
- int transition = transitions->Search(kData, *name, attributes);
- CHECK_EQ(*name, transitions->GetKey(transition));
- CHECK_EQ(*attr_maps[i], transitions->GetTarget(transition));
+ CHECK_EQ(*attr_maps[i], TransitionArray::SearchTransition(
+ *map0, kData, *name, attributes));
+ // All transitions use the same key, so this check doesn't need to
+ // care about ordering.
+ CHECK_EQ(*name, TransitionArray::GetKey(map0->raw_transitions(), i));
}
- DCHECK(transitions->IsSortedNoDuplicates());
+ DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
}
@@ -243,9 +236,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
Handle<Map> maps[PROPS_COUNT];
Handle<Map> map0 = Map::Create(isolate, 0);
- CHECK(!map0->HasTransitionArray());
- Handle<TransitionArray> transitions = TransitionArray::Allocate(isolate, 0);
- CHECK(transitions->IsFullTransitionArray());
+ CHECK(map0->raw_transitions()->IsSmi());
// Some number of fields.
for (int i = 0; i < PROPS_COUNT; i++) {
@@ -259,8 +250,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
names[i] = name;
maps[i] = map;
- transitions = transitions->Insert(map0, name, map, PROPERTY_TRANSITION);
- ConnectTransition(map0, transitions, map);
+ TransitionArray::Insert(map0, name, map, PROPERTY_TRANSITION);
}
const int ATTRS_COUNT = (READ_ONLY | DONT_ENUM | DONT_DELETE) + 1;
@@ -278,25 +268,36 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
OMIT_TRANSITION).ToHandleChecked();
attr_maps[i] = map;
- transitions = transitions->Insert(map0, name, map, PROPERTY_TRANSITION);
- ConnectTransition(map0, transitions, map);
+ TransitionArray::Insert(map0, name, map, PROPERTY_TRANSITION);
}
// Ensure that transitions for |name| field are valid.
for (int i = 0; i < ATTRS_COUNT; i++) {
- PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
-
- int transition = transitions->Search(kData, *name, attributes);
- CHECK_EQ(*name, transitions->GetKey(transition));
- CHECK_EQ(*attr_maps[i], transitions->GetTarget(transition));
+ PropertyAttributes attr = static_cast<PropertyAttributes>(i);
+ CHECK_EQ(*attr_maps[i],
+ TransitionArray::SearchTransition(*map0, kData, *name, attr));
}
// Ensure that info about the other fields still valid.
- for (int i = 0; i < PROPS_COUNT; i++) {
- int transition = transitions->Search(kData, *names[i], NONE);
- CHECK_EQ(*names[i], transitions->GetKey(transition));
- CHECK_EQ(*maps[i], transitions->GetTarget(transition));
+ CHECK_EQ(PROPS_COUNT + ATTRS_COUNT,
+ TransitionArray::NumberOfTransitions(map0->raw_transitions()));
+ for (int i = 0; i < PROPS_COUNT + ATTRS_COUNT; i++) {
+ Name* key = TransitionArray::GetKey(map0->raw_transitions(), i);
+ Map* target = TransitionArray::GetTarget(map0->raw_transitions(), i);
+ if (key == *name) {
+ // Attributes transition.
+ PropertyAttributes attributes =
+ target->GetLastDescriptorDetails().attributes();
+ CHECK_EQ(*attr_maps[static_cast<int>(attributes)], target);
+ } else {
+ for (int j = 0; j < PROPS_COUNT; j++) {
+ if (*names[j] == key) {
+ CHECK_EQ(*maps[j], target);
+ break;
+ }
+ }
+ }
}
- DCHECK(transitions->IsSortedNoDuplicates());
+ DCHECK(TransitionArray::IsSortedNoDuplicates(*map0));
}
diff --git a/deps/v8/test/cctest/test-typedarrays.cc b/deps/v8/test/cctest/test-typedarrays.cc
new file mode 100644
index 0000000000..d031048cae
--- /dev/null
+++ b/deps/v8/test/cctest/test-typedarrays.cc
@@ -0,0 +1,81 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/api.h"
+#include "src/heap/heap.h"
+#include "src/objects.h"
+#include "src/v8.h"
+
+using namespace v8::internal;
+
+void TestArrayBufferViewContents(LocalContext& env, bool should_use_buffer) {
+ v8::Local<v8::Object> obj_a =
+ v8::Local<v8::Object>::Cast(env->Global()->Get(v8_str("a")));
+ CHECK(obj_a->IsArrayBufferView());
+ v8::Local<v8::ArrayBufferView> array_buffer_view =
+ v8::Local<v8::ArrayBufferView>::Cast(obj_a);
+ CHECK_EQ(array_buffer_view->HasBuffer(), should_use_buffer);
+ unsigned char contents[4] = {23, 23, 23, 23};
+ CHECK_EQ(sizeof(contents),
+ array_buffer_view->CopyContents(contents, sizeof(contents)));
+ CHECK_EQ(array_buffer_view->HasBuffer(), should_use_buffer);
+ for (size_t i = 0; i < sizeof(contents); ++i) {
+ CHECK_EQ(i, contents[i]);
+ }
+}
+
+
+TEST(CopyContentsTypedArray) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun(
+ "var a = new Uint8Array(4);"
+ "a[0] = 0;"
+ "a[1] = 1;"
+ "a[2] = 2;"
+ "a[3] = 3;");
+ TestArrayBufferViewContents(env, false);
+}
+
+
+TEST(CopyContentsArray) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun("var a = new Uint8Array([0, 1, 2, 3]);");
+ TestArrayBufferViewContents(env, true);
+}
+
+
+TEST(CopyContentsView) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun(
+ "var b = new ArrayBuffer(6);"
+ "var c = new Uint8Array(b);"
+ "c[0] = -1;"
+ "c[1] = -1;"
+ "c[2] = 0;"
+ "c[3] = 1;"
+ "c[4] = 2;"
+ "c[5] = 3;"
+ "var a = new DataView(b, 2);");
+ TestArrayBufferViewContents(env, true);
+}
+
+
+TEST(AllocateNotExternal) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ void* memory = V8::ArrayBufferAllocator()->Allocate(1024);
+ v8::Local<v8::ArrayBuffer> buffer =
+ v8::ArrayBuffer::New(env->GetIsolate(), memory, 1024,
+ v8::ArrayBufferCreationMode::kInternalized);
+ CHECK(!buffer->IsExternal());
+ CHECK_EQ(memory, buffer->GetContents().Data());
+}
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index fdcac3af35..05c13e5776 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -18,7 +18,7 @@
using namespace v8::base;
using namespace v8::internal;
-#if (V8_DOUBLE_FIELDS_UNBOXING)
+#if V8_DOUBLE_FIELDS_UNBOXING
//
@@ -30,7 +30,7 @@ static void InitializeVerifiedMapDescriptors(
Map* map, DescriptorArray* descriptors,
LayoutDescriptor* layout_descriptor) {
map->InitializeDescriptors(descriptors, layout_descriptor);
- CHECK(layout_descriptor->IsConsistentWithMap(map));
+ CHECK(layout_descriptor->IsConsistentWithMap(map, true));
}
@@ -48,6 +48,12 @@ static Handle<String> MakeName(const char* str, int suffix) {
}
+Handle<JSObject> GetObject(const char* name) {
+ return v8::Utils::OpenHandle(
+ *v8::Handle<v8::Object>::Cast(CcTest::global()->Get(v8_str(name))));
+}
+
+
static double GetDoubleFieldValue(JSObject* obj, FieldIndex field_index) {
if (obj->IsUnboxedDoubleField(field_index)) {
return obj->RawFastDoublePropertyAt(field_index);
@@ -224,7 +230,7 @@ TEST(LayoutDescriptorBasicSlow) {
}
CHECK(layout_desc->IsSlowLayout());
CHECK(!layout_desc->IsFastPointerLayout());
- CHECK(layout_descriptor->IsConsistentWithMap(*map));
+ CHECK(layout_descriptor->IsConsistentWithMap(*map, true));
}
}
@@ -638,7 +644,7 @@ static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
map->InitializeDescriptors(*descriptors, *layout_descriptor);
}
Handle<LayoutDescriptor> layout_descriptor(map->layout_descriptor(), isolate);
- CHECK(layout_descriptor->IsConsistentWithMap(*map));
+ CHECK(layout_descriptor->IsConsistentWithMap(*map, true));
return layout_descriptor;
}
@@ -907,42 +913,126 @@ TEST(Regress436816) {
}
+TEST(DescriptorArrayTrimming) {
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+
+ const int kFieldCount = 128;
+ const int kSplitFieldIndex = 32;
+ const int kTrimmedLayoutDescriptorLength = 64;
+
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<Map> map = Map::Create(isolate, kFieldCount);
+ for (int i = 0; i < kSplitFieldIndex; i++) {
+ map = Map::CopyWithField(map, MakeName("prop", i), any_type, NONE,
+ Representation::Smi(),
+ INSERT_TRANSITION).ToHandleChecked();
+ }
+ map = Map::CopyWithField(map, MakeName("dbl", kSplitFieldIndex), any_type,
+ NONE, Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
+ CHECK(map->layout_descriptor()->IsConsistentWithMap(*map, true));
+ CHECK(map->layout_descriptor()->IsSlowLayout());
+ CHECK(map->owns_descriptors());
+ CHECK_EQ(2, map->layout_descriptor()->length());
+
+ {
+ // Add transitions to double fields.
+ v8::HandleScope scope(CcTest::isolate());
+
+ Handle<Map> tmp_map = map;
+ for (int i = kSplitFieldIndex + 1; i < kFieldCount; i++) {
+ tmp_map = Map::CopyWithField(tmp_map, MakeName("dbl", i), any_type, NONE,
+ Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
+ CHECK(tmp_map->layout_descriptor()->IsConsistentWithMap(*tmp_map, true));
+ }
+ // Check that descriptors are shared.
+ CHECK(tmp_map->owns_descriptors());
+ CHECK_EQ(map->instance_descriptors(), tmp_map->instance_descriptors());
+ CHECK_EQ(map->layout_descriptor(), tmp_map->layout_descriptor());
+ }
+ CHECK(map->layout_descriptor()->IsSlowLayout());
+ CHECK_EQ(4, map->layout_descriptor()->length());
+
+ // The unused tail of the layout descriptor is now "durty" because of sharing.
+ CHECK(map->layout_descriptor()->IsConsistentWithMap(*map));
+ for (int i = kSplitFieldIndex + 1; i < kTrimmedLayoutDescriptorLength; i++) {
+ CHECK(!map->layout_descriptor()->IsTagged(i));
+ }
+ CHECK_LT(map->NumberOfOwnDescriptors(),
+ map->instance_descriptors()->number_of_descriptors());
+
+ // Call GC that should trim both |map|'s descriptor array and layout
+ // descriptor.
+ CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+
+ // The unused tail of the layout descriptor is now "clean" again.
+ CHECK(map->layout_descriptor()->IsConsistentWithMap(*map, true));
+ CHECK(map->owns_descriptors());
+ CHECK_EQ(map->NumberOfOwnDescriptors(),
+ map->instance_descriptors()->number_of_descriptors());
+ CHECK(map->layout_descriptor()->IsSlowLayout());
+ CHECK_EQ(2, map->layout_descriptor()->length());
+
+ {
+ // Add transitions to tagged fields.
+ v8::HandleScope scope(CcTest::isolate());
+
+ Handle<Map> tmp_map = map;
+ for (int i = kSplitFieldIndex + 1; i < kFieldCount - 1; i++) {
+ tmp_map = Map::CopyWithField(tmp_map, MakeName("tagged", i), any_type,
+ NONE, Representation::Tagged(),
+ INSERT_TRANSITION).ToHandleChecked();
+ CHECK(tmp_map->layout_descriptor()->IsConsistentWithMap(*tmp_map, true));
+ }
+ tmp_map = Map::CopyWithField(tmp_map, MakeString("dbl"), any_type, NONE,
+ Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
+ CHECK(tmp_map->layout_descriptor()->IsConsistentWithMap(*tmp_map, true));
+ // Check that descriptors are shared.
+ CHECK(tmp_map->owns_descriptors());
+ CHECK_EQ(map->instance_descriptors(), tmp_map->instance_descriptors());
+ }
+ CHECK(map->layout_descriptor()->IsSlowLayout());
+}
+
+
TEST(DoScavenge) {
CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
- v8::HandleScope scope(CcTest::isolate());
- CompileRun(
- "function A() {"
- " this.x = 42.5;"
- " this.o = {};"
- "};"
- "var o = new A();");
+ // The plan: create |obj| with double field in new space, do scanvenge so
+ // that |obj| is moved to old space, construct a double value that looks like
+ // a pointer to "from space" pointer. Do scavenge one more time and ensure
+ // that it didn't crash or corrupt the double value stored in the object.
- Handle<String> obj_name = factory->InternalizeUtf8String("o");
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<Map> map = Map::Create(isolate, 10);
+ map = Map::CopyWithField(map, MakeName("prop", 0), any_type, NONE,
+ Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
- Handle<Object> obj_value =
- Object::GetProperty(isolate->global_object(), obj_name).ToHandleChecked();
- CHECK(obj_value->IsJSObject());
- Handle<JSObject> obj = Handle<JSObject>::cast(obj_value);
+ // Create object in new space.
+ Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED, false);
+
+ Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
+ obj->WriteToField(0, *heap_number);
{
// Ensure the object is properly set up.
- Map* map = obj->map();
- DescriptorArray* descriptors = map->instance_descriptors();
- CHECK(map->NumberOfOwnDescriptors() == 2);
- CHECK(descriptors->GetDetails(0).representation().IsDouble());
- CHECK(descriptors->GetDetails(1).representation().IsHeapObject());
- FieldIndex field_index = FieldIndex::ForDescriptor(map, 0);
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, 0);
CHECK(field_index.is_inobject() && field_index.is_double());
CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
}
CHECK(isolate->heap()->new_space()->Contains(*obj));
- // Trigger GCs so that the newly allocated object moves to old gen.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ // Do scavenge so that |obj| is moved to survivor space.
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE);
// Create temp object in the new space.
Handle<JSArray> temp = factory->NewJSArray(FAST_ELEMENTS, NOT_TENURED);
@@ -957,9 +1047,9 @@ TEST(DoScavenge) {
Handle<HeapNumber> boom_number = factory->NewHeapNumber(boom_value, MUTABLE);
obj->FastPropertyAtPut(field_index, *boom_number);
- // Now the object moves to old gen and it has a double field that looks like
+ // Now |obj| moves to old gen and it has a double field that looks like
// a pointer to a from semi-space.
- CcTest::heap()->CollectGarbage(i::NEW_SPACE); // in old gen now
+ CcTest::heap()->CollectGarbage(i::NEW_SPACE, "boom");
CHECK(isolate->heap()->old_pointer_space()->Contains(*obj));
@@ -967,6 +1057,96 @@ TEST(DoScavenge) {
}
+TEST(DoScavengeWithIncrementalWriteBarrier) {
+ if (FLAG_never_compact || !FLAG_incremental_marking) return;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = CcTest::heap();
+ PagedSpace* old_pointer_space = heap->old_pointer_space();
+
+ // The plan: create |obj_value| in old space and ensure that it is allocated
+ // on evacuation candidate page, create |obj| with double and tagged fields
+ // in new space and write |obj_value| to tagged field of |obj|, do two
+ // scavenges to promote |obj| to old space, a GC in old space and ensure that
+ // the tagged value was properly updated after candidates evacuation.
+
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<Map> map = Map::Create(isolate, 10);
+ map = Map::CopyWithField(map, MakeName("prop", 0), any_type, NONE,
+ Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
+ map = Map::CopyWithField(map, MakeName("prop", 1), any_type, NONE,
+ Representation::Tagged(),
+ INSERT_TRANSITION).ToHandleChecked();
+
+ // Create |obj_value| in old space.
+ Handle<HeapObject> obj_value;
+ Page* ec_page;
+ {
+ AlwaysAllocateScope always_allocate(isolate);
+ // Make sure |obj_value| is placed on an old-space evacuation candidate.
+ SimulateFullSpace(old_pointer_space);
+ obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
+ ec_page = Page::FromAddress(obj_value->address());
+ }
+
+ // Create object in new space.
+ Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED, false);
+
+ Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
+ obj->WriteToField(0, *heap_number);
+ obj->WriteToField(1, *obj_value);
+
+ {
+ // Ensure the object is properly set up.
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, 0);
+ CHECK(field_index.is_inobject() && field_index.is_double());
+ CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
+ CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
+
+ field_index = FieldIndex::ForDescriptor(*map, 1);
+ CHECK(field_index.is_inobject() && !field_index.is_double());
+ CHECK(!map->IsUnboxedDoubleField(field_index));
+ }
+ CHECK(isolate->heap()->new_space()->Contains(*obj));
+
+ // Heap is ready, force |ec_page| to become an evacuation candidate and
+ // simulate incremental marking.
+ FLAG_stress_compaction = true;
+ FLAG_manual_evacuation_candidates_selection = true;
+ ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ SimulateIncrementalMarking(heap);
+ // Disable stress compaction mode in order to let GC do scavenge.
+ FLAG_stress_compaction = false;
+
+ // Check that everything is ready for triggering incremental write barrier
+ // during scavenge (i.e. that |obj| is black and incremental marking is
+ // in compacting mode and |obj_value|'s page is an evacuation candidate).
+ IncrementalMarking* marking = heap->incremental_marking();
+ CHECK(marking->IsCompacting());
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj)));
+ CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
+
+ // Trigger GCs so that |obj| moves to old gen.
+ heap->CollectGarbage(i::NEW_SPACE); // in survivor space now
+ heap->CollectGarbage(i::NEW_SPACE); // in old gen now
+
+ CHECK(isolate->heap()->old_pointer_space()->Contains(*obj));
+ CHECK(isolate->heap()->old_pointer_space()->Contains(*obj_value));
+ CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
+
+ heap->CollectGarbage(i::OLD_POINTER_SPACE, "boom");
+
+ // |obj_value| must be evacuated.
+ CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
+
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, 1);
+ CHECK_EQ(*obj_value, obj->RawFastPropertyAt(field_index));
+}
+
+
static void TestLayoutDescriptorHelper(Isolate* isolate,
int inobject_properties,
Handle<DescriptorArray> descriptors,
@@ -1131,7 +1311,7 @@ TEST(LayoutDescriptorSharing) {
}
Handle<LayoutDescriptor> split_layout_descriptor(
split_map->layout_descriptor(), isolate);
- CHECK(split_layout_descriptor->IsConsistentWithMap(*split_map));
+ CHECK(split_layout_descriptor->IsConsistentWithMap(*split_map, true));
CHECK(split_layout_descriptor->IsSlowLayout());
CHECK(split_map->owns_descriptors());
@@ -1144,7 +1324,7 @@ TEST(LayoutDescriptorSharing) {
// Layout descriptors should be shared with |split_map|.
CHECK(map1->owns_descriptors());
CHECK_EQ(*split_layout_descriptor, map1->layout_descriptor());
- CHECK(map1->layout_descriptor()->IsConsistentWithMap(*map1));
+ CHECK(map1->layout_descriptor()->IsConsistentWithMap(*map1, true));
Handle<Map> map2 = Map::CopyWithField(split_map, MakeString("bar"), any_type,
NONE, Representation::Tagged(),
@@ -1153,7 +1333,7 @@ TEST(LayoutDescriptorSharing) {
// Layout descriptors should not be shared with |split_map|.
CHECK(map2->owns_descriptors());
CHECK_NE(*split_layout_descriptor, map2->layout_descriptor());
- CHECK(map2->layout_descriptor()->IsConsistentWithMap(*map2));
+ CHECK(map2->layout_descriptor()->IsConsistentWithMap(*map2, true));
}
@@ -1163,28 +1343,23 @@ TEST(StoreBufferScanOnScavenge) {
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
- CompileRun(
- "function A() {"
- " this.x = 42.5;"
- " this.o = {};"
- "};"
- "var o = new A();");
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+ Handle<Map> map = Map::Create(isolate, 10);
+ map = Map::CopyWithField(map, MakeName("prop", 0), any_type, NONE,
+ Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
- Handle<String> obj_name = factory->InternalizeUtf8String("o");
+ // Create object in new space.
+ Handle<JSObject> obj = factory->NewJSObjectFromMap(map, NOT_TENURED, false);
- Handle<Object> obj_value =
- Object::GetProperty(isolate->global_object(), obj_name).ToHandleChecked();
- CHECK(obj_value->IsJSObject());
- Handle<JSObject> obj = Handle<JSObject>::cast(obj_value);
+ Handle<HeapNumber> heap_number = factory->NewHeapNumber(42.5);
+ obj->WriteToField(0, *heap_number);
{
// Ensure the object is properly set up.
- Map* map = obj->map();
DescriptorArray* descriptors = map->instance_descriptors();
- CHECK(map->NumberOfOwnDescriptors() == 2);
CHECK(descriptors->GetDetails(0).representation().IsDouble());
- CHECK(descriptors->GetDetails(1).representation().IsHeapObject());
- FieldIndex field_index = FieldIndex::ForDescriptor(map, 0);
+ FieldIndex field_index = FieldIndex::ForDescriptor(*map, 0);
CHECK(field_index.is_inobject() && field_index.is_double());
CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
@@ -1305,4 +1480,211 @@ TEST(WriteBarriersInCopyJSObject) {
CHECK_EQ(boom_value, clone->RawFastDoublePropertyAt(index));
}
+
+static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
+ int tagged_descriptor, int double_descriptor,
+ bool check_tagged_value = true) {
+ FLAG_stress_compaction = true;
+ FLAG_manual_evacuation_candidates_selection = true;
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = CcTest::heap();
+ PagedSpace* old_pointer_space = heap->old_pointer_space();
+
+ // The plan: create |obj| by |map| in old space, create |obj_value| in
+ // new space and ensure that write barrier is triggered when |obj_value| is
+ // written to property |tagged_descriptor| of |obj|.
+ // Then migrate object to |new_map| and set proper value for property
+ // |double_descriptor|. Call GC and ensure that it did not crash during
+ // store buffer entries updating.
+
+ Handle<JSObject> obj;
+ Handle<HeapObject> obj_value;
+ {
+ AlwaysAllocateScope always_allocate(isolate);
+ obj = factory->NewJSObjectFromMap(map, TENURED, false);
+ CHECK(old_pointer_space->Contains(*obj));
+
+ obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS);
+ }
+
+ CHECK(heap->InNewSpace(*obj_value));
+
+ {
+ FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
+ const int n = 153;
+ for (int i = 0; i < n; i++) {
+ obj->FastPropertyAtPut(index, *obj_value);
+ }
+ }
+
+ // Migrate |obj| to |new_map| which should shift fields and put the
+ // |boom_value| to the slot that was earlier recorded by write barrier.
+ JSObject::MigrateToMap(obj, new_map);
+
+ Address fake_object = reinterpret_cast<Address>(*obj_value) + kPointerSize;
+ double boom_value = bit_cast<double>(fake_object);
+
+ FieldIndex double_field_index =
+ FieldIndex::ForDescriptor(*new_map, double_descriptor);
+ CHECK(obj->IsUnboxedDoubleField(double_field_index));
+ obj->RawFastDoublePropertyAtPut(double_field_index, boom_value);
+
+ // Trigger GC to evacuate all candidates.
+ CcTest::heap()->CollectGarbage(NEW_SPACE, "boom");
+
+ if (check_tagged_value) {
+ FieldIndex tagged_field_index =
+ FieldIndex::ForDescriptor(*new_map, tagged_descriptor);
+ CHECK_EQ(*obj_value, obj->RawFastPropertyAt(tagged_field_index));
+ }
+ CHECK_EQ(boom_value, obj->RawFastDoublePropertyAt(double_field_index));
+}
+
+
+static void TestIncrementalWriteBarrier(Handle<Map> map, Handle<Map> new_map,
+ int tagged_descriptor,
+ int double_descriptor,
+ bool check_tagged_value = true) {
+ if (FLAG_never_compact || !FLAG_incremental_marking) return;
+ FLAG_stress_compaction = true;
+ FLAG_manual_evacuation_candidates_selection = true;
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = CcTest::heap();
+ PagedSpace* old_pointer_space = heap->old_pointer_space();
+
+ // The plan: create |obj| by |map| in old space, create |obj_value| in
+ // old space and ensure it end up in evacuation candidate page. Start
+ // incremental marking and ensure that incremental write barrier is triggered
+ // when |obj_value| is written to property |tagged_descriptor| of |obj|.
+ // Then migrate object to |new_map| and set proper value for property
+ // |double_descriptor|. Call GC and ensure that it did not crash during
+ // slots buffer entries updating.
+
+ Handle<JSObject> obj;
+ Handle<HeapObject> obj_value;
+ Page* ec_page;
+ {
+ AlwaysAllocateScope always_allocate(isolate);
+ obj = factory->NewJSObjectFromMap(map, TENURED, false);
+ CHECK(old_pointer_space->Contains(*obj));
+
+ // Make sure |obj_value| is placed on an old-space evacuation candidate.
+ SimulateFullSpace(old_pointer_space);
+ obj_value = factory->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
+ ec_page = Page::FromAddress(obj_value->address());
+ CHECK_NE(ec_page, Page::FromAddress(obj->address()));
+ }
+
+ // Heap is ready, force |ec_page| to become an evacuation candidate and
+ // simulate incremental marking.
+ ec_page->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ SimulateIncrementalMarking(heap);
+
+ // Check that everything is ready for triggering incremental write barrier
+ // (i.e. that both |obj| and |obj_value| are black and the marking phase is
+ // still active and |obj_value|'s page is indeed an evacuation candidate).
+ IncrementalMarking* marking = heap->incremental_marking();
+ CHECK(marking->IsMarking());
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj)));
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(*obj_value)));
+ CHECK(MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
+
+ // Trigger incremental write barrier, which should add a slot to |ec_page|'s
+ // slots buffer.
+ {
+ int slots_buffer_len = SlotsBuffer::SizeOfChain(ec_page->slots_buffer());
+ FieldIndex index = FieldIndex::ForDescriptor(*map, tagged_descriptor);
+ const int n = SlotsBuffer::kNumberOfElements + 10;
+ for (int i = 0; i < n; i++) {
+ obj->FastPropertyAtPut(index, *obj_value);
+ }
+ // Ensure that the slot was actually added to the |ec_page|'s slots buffer.
+ CHECK_EQ(slots_buffer_len + n,
+ SlotsBuffer::SizeOfChain(ec_page->slots_buffer()));
+ }
+
+ // Migrate |obj| to |new_map| which should shift fields and put the
+ // |boom_value| to the slot that was earlier recorded by incremental write
+ // barrier.
+ JSObject::MigrateToMap(obj, new_map);
+
+ double boom_value = bit_cast<double>(UINT64_C(0xbaad0176a37c28e1));
+
+ FieldIndex double_field_index =
+ FieldIndex::ForDescriptor(*new_map, double_descriptor);
+ CHECK(obj->IsUnboxedDoubleField(double_field_index));
+ obj->RawFastDoublePropertyAtPut(double_field_index, boom_value);
+
+ // Trigger GC to evacuate all candidates.
+ CcTest::heap()->CollectGarbage(OLD_POINTER_SPACE, "boom");
+
+ // Ensure that the values are still there and correct.
+ CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(*obj_value));
+
+ if (check_tagged_value) {
+ FieldIndex tagged_field_index =
+ FieldIndex::ForDescriptor(*new_map, tagged_descriptor);
+ CHECK_EQ(*obj_value, obj->RawFastPropertyAt(tagged_field_index));
+ }
+ CHECK_EQ(boom_value, obj->RawFastDoublePropertyAt(double_field_index));
+}
+
+
+enum WriteBarrierKind { OLD_TO_OLD_WRITE_BARRIER, OLD_TO_NEW_WRITE_BARRIER };
+static void TestWriteBarrierObjectShiftFieldsRight(
+ WriteBarrierKind write_barrier_kind) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ v8::HandleScope scope(CcTest::isolate());
+
+ Handle<HeapType> any_type = HeapType::Any(isolate);
+
+ CompileRun("function func() { return 1; }");
+
+ Handle<JSObject> func = GetObject("func");
+
+ Handle<Map> map = Map::Create(isolate, 10);
+ map = Map::CopyWithConstant(map, MakeName("prop", 0), func, NONE,
+ INSERT_TRANSITION).ToHandleChecked();
+ map = Map::CopyWithField(map, MakeName("prop", 1), any_type, NONE,
+ Representation::Double(),
+ INSERT_TRANSITION).ToHandleChecked();
+ map = Map::CopyWithField(map, MakeName("prop", 2), any_type, NONE,
+ Representation::Tagged(),
+ INSERT_TRANSITION).ToHandleChecked();
+
+ // Shift fields right by turning constant property to a field.
+ Handle<Map> new_map = Map::ReconfigureProperty(
+ map, 0, kData, NONE, Representation::Tagged(), any_type, FORCE_FIELD);
+
+ if (write_barrier_kind == OLD_TO_NEW_WRITE_BARRIER) {
+ TestWriteBarrier(map, new_map, 2, 1);
+ } else {
+ CHECK_EQ(OLD_TO_OLD_WRITE_BARRIER, write_barrier_kind);
+ TestIncrementalWriteBarrier(map, new_map, 2, 1);
+ }
+}
+
+
+// TODO(ishell): enable when this issue is fixed.
+DISABLED_TEST(WriteBarrierObjectShiftFieldsRight) {
+ TestWriteBarrierObjectShiftFieldsRight(OLD_TO_NEW_WRITE_BARRIER);
+}
+
+
+TEST(IncrementalWriteBarrierObjectShiftFieldsRight) {
+ TestWriteBarrierObjectShiftFieldsRight(OLD_TO_OLD_WRITE_BARRIER);
+}
+
+
+// TODO(ishell): add respective tests for property kind reconfiguring from
+// accessor field to double, once accessor fields are supported by
+// Map::ReconfigureProperty().
+
+
+// TODO(ishell): add respective tests for fast property removal case once
+// Map::ReconfigureProperty() supports that.
+
#endif
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 04f41b9aee..dfe3f453c5 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -30,7 +30,6 @@
#include "src/v8.h"
#include "src/global-handles.h"
-#include "src/snapshot.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -184,7 +183,8 @@ TEST(Regress2060a) {
// Start second old-space page so that values land on evacuation candidate.
Page* first_page = heap->old_pointer_space()->anchor()->next_page();
- factory->NewFixedArray(900 * KB / kPointerSize, TENURED);
+ int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
+ factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
// Fill up weak map with values on an evacuation candidate.
{
@@ -222,7 +222,8 @@ TEST(Regress2060b) {
// Start second old-space page so that keys land on evacuation candidate.
Page* first_page = heap->old_pointer_space()->anchor()->next_page();
- factory->NewFixedArray(900 * KB / kPointerSize, TENURED);
+ int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
+ factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
// Fill up weak map with keys on an evacuation candidate.
Handle<JSObject> keys[32];
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index f08a99bcbf..1ab9f10989 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -30,7 +30,6 @@
#include "src/v8.h"
#include "src/global-handles.h"
-#include "src/snapshot.h"
#include "test/cctest/cctest.h"
using namespace v8::internal;
@@ -184,7 +183,8 @@ TEST(WeakSet_Regress2060a) {
// Start second old-space page so that values land on evacuation candidate.
Page* first_page = heap->old_pointer_space()->anchor()->next_page();
- factory->NewFixedArray(900 * KB / kPointerSize, TENURED);
+ int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
+ factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
// Fill up weak set with values on an evacuation candidate.
{
@@ -222,7 +222,8 @@ TEST(WeakSet_Regress2060b) {
// Start second old-space page so that keys land on evacuation candidate.
Page* first_page = heap->old_pointer_space()->anchor()->next_page();
- factory->NewFixedArray(900 * KB / kPointerSize, TENURED);
+ int dummy_array_size = Page::kMaxRegularHeapObjectSize - 92 * KB;
+ factory->NewFixedArray(dummy_array_size / kPointerSize, TENURED);
// Fill up weak set with keys on an evacuation candidate.
Handle<JSObject> keys[32];
diff --git a/deps/v8/test/cctest/test-weaktypedarrays.cc b/deps/v8/test/cctest/test-weaktypedarrays.cc
index d40b7e95a9..c1f59de45a 100644
--- a/deps/v8/test/cctest/test-weaktypedarrays.cc
+++ b/deps/v8/test/cctest/test-weaktypedarrays.cc
@@ -62,7 +62,20 @@ static bool HasArrayBufferInWeakList(Heap* heap, JSArrayBuffer* ab) {
}
-static int CountViews(JSArrayBuffer* array_buffer) {
+static int CountViewsInNewSpaceList(Heap* heap, JSArrayBuffer* array_buffer) {
+ int count = 0;
+ for (Object* o = heap->new_array_buffer_views_list(); !o->IsUndefined();) {
+ JSArrayBufferView* view = JSArrayBufferView::cast(o);
+ if (array_buffer == view->buffer()) {
+ count++;
+ }
+ o = view->weak_next();
+ }
+ return count;
+}
+
+
+static int CountViews(Heap* heap, JSArrayBuffer* array_buffer) {
int count = 0;
for (Object* o = array_buffer->weak_first_view();
!o->IsUndefined();
@@ -70,17 +83,27 @@ static int CountViews(JSArrayBuffer* array_buffer) {
count++;
}
- return count;
+ return count + CountViewsInNewSpaceList(heap, array_buffer);
}
-static bool HasViewInWeakList(JSArrayBuffer* array_buffer,
+
+static bool HasViewInNewSpaceList(Heap* heap, JSArrayBufferView* ta) {
+ for (Object* o = heap->new_array_buffer_views_list(); !o->IsUndefined();
+ o = JSArrayBufferView::cast(o)->weak_next()) {
+ if (ta == o) return true;
+ }
+ return false;
+}
+
+
+static bool HasViewInWeakList(Heap* heap, JSArrayBuffer* array_buffer,
JSArrayBufferView* ta) {
for (Object* o = array_buffer->weak_first_view();
!o->IsUndefined();
o = JSArrayBufferView::cast(o)->weak_next()) {
if (ta == o) return true;
}
- return false;
+ return HasViewInNewSpaceList(heap, ta);
}
@@ -200,18 +223,18 @@ void TestViewFromApi() {
Handle<JSArrayBufferView> ita1 = v8::Utils::OpenHandle(*ta1);
Handle<JSArrayBufferView> ita2 = v8::Utils::OpenHandle(*ta2);
- CHECK_EQ(2, CountViews(*iab));
- CHECK(HasViewInWeakList(*iab, *ita1));
- CHECK(HasViewInWeakList(*iab, *ita2));
+ CHECK_EQ(2, CountViews(isolate->heap(), *iab));
+ CHECK(HasViewInWeakList(isolate->heap(), *iab, *ita1));
+ CHECK(HasViewInWeakList(isolate->heap(), *iab, *ita2));
}
isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(1, CountViews(*iab));
+ CHECK_EQ(1, CountViews(isolate->heap(), *iab));
Handle<JSArrayBufferView> ita1 = v8::Utils::OpenHandle(*ta1);
- CHECK(HasViewInWeakList(*iab, *ita1));
+ CHECK(HasViewInWeakList(isolate->heap(), *iab, *ita1));
}
isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK_EQ(0, CountViews(*iab));
+ CHECK_EQ(0, CountViews(isolate->heap(), *iab));
}
@@ -299,10 +322,13 @@ static void TestTypedArrayFromScript(const char* constructor) {
v8::Handle<TypedArray>::Cast(CompileRun("ta3"));
CHECK_EQ(1, CountArrayBuffersInWeakList(isolate->heap()) - start);
Handle<JSArrayBuffer> iab = v8::Utils::OpenHandle(*ab);
- CHECK_EQ(3, CountViews(*iab));
- CHECK(HasViewInWeakList(*iab, *v8::Utils::OpenHandle(*ta1)));
- CHECK(HasViewInWeakList(*iab, *v8::Utils::OpenHandle(*ta2)));
- CHECK(HasViewInWeakList(*iab, *v8::Utils::OpenHandle(*ta3)));
+ CHECK_EQ(3, CountViews(isolate->heap(), *iab));
+ CHECK(HasViewInWeakList(isolate->heap(), *iab,
+ *v8::Utils::OpenHandle(*ta1)));
+ CHECK(HasViewInWeakList(isolate->heap(), *iab,
+ *v8::Utils::OpenHandle(*ta2)));
+ CHECK(HasViewInWeakList(isolate->heap(), *iab,
+ *v8::Utils::OpenHandle(*ta3)));
}
i::SNPrintF(source, "ta%d = null;", i);
@@ -316,13 +342,14 @@ static void TestTypedArrayFromScript(const char* constructor) {
v8::Handle<v8::ArrayBuffer> ab =
v8::Handle<v8::ArrayBuffer>::Cast(CompileRun("ab"));
Handle<JSArrayBuffer> iab = v8::Utils::OpenHandle(*ab);
- CHECK_EQ(2, CountViews(*iab));
+ CHECK_EQ(2, CountViews(isolate->heap(), *iab));
for (int j = 1; j <= 3; j++) {
if (j == i) continue;
i::SNPrintF(source, "ta%d", j);
v8::Handle<TypedArray> ta =
v8::Handle<TypedArray>::Cast(CompileRun(source.start()));
- CHECK(HasViewInWeakList(*iab, *v8::Utils::OpenHandle(*ta)));
+ CHECK(HasViewInWeakList(isolate->heap(), *iab,
+ *v8::Utils::OpenHandle(*ta)));
}
}
@@ -336,7 +363,7 @@ static void TestTypedArrayFromScript(const char* constructor) {
v8::Handle<v8::ArrayBuffer> ab =
v8::Handle<v8::ArrayBuffer>::Cast(CompileRun("ab"));
Handle<JSArrayBuffer> iab = v8::Utils::OpenHandle(*ab);
- CHECK_EQ(0, CountViews(*iab));
+ CHECK_EQ(0, CountViews(isolate->heap(), *iab));
}
}
}
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index 0a99ad4d6a..a703642390 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -60,7 +60,6 @@
"path": ["Strings"],
"main": "run.js",
"resources": ["harmony-string.js"],
- "flags": ["--harmony-strings"],
"results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
"tests": [
{"name": "StringFunctions"}
@@ -71,7 +70,6 @@
"path": ["Templates"],
"main": "run.js",
"resources": ["templates.js"],
- "flags": ["--harmony-templates"],
"run_count": 5,
"units": "score",
"results_regexp": "^%s\\-Templates\\(Score\\): (.+)$",
diff --git a/deps/v8/test/mjsunit/harmony/disable-harmony-string.js b/deps/v8/test/message/class-constructor-accessor.js
index 0b88ae0be9..edc3c13169 100644
--- a/deps/v8/test/mjsunit/harmony/disable-harmony-string.js
+++ b/deps/v8/test/message/class-constructor-accessor.js
@@ -1,7 +1,10 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+'use strict';
-// Flags: --noharmony-strings
-
-assertEquals(undefined, String.prototype.includes);
+class C {
+ get constructor() {}
+}
diff --git a/deps/v8/test/message/class-constructor-accessor.out b/deps/v8/test/message/class-constructor-accessor.out
new file mode 100644
index 0000000000..8776f54db1
--- /dev/null
+++ b/deps/v8/test/message/class-constructor-accessor.out
@@ -0,0 +1,7 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:9: SyntaxError: Class constructor may not be an accessor
+ get constructor() {}
+ ^^^^^^^^^^^
+SyntaxError: Class constructor may not be an accessor
diff --git a/deps/v8/test/message/class-constructor-generator.js b/deps/v8/test/message/class-constructor-generator.js
new file mode 100644
index 0000000000..5d370f865e
--- /dev/null
+++ b/deps/v8/test/message/class-constructor-generator.js
@@ -0,0 +1,10 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+'use strict';
+
+class C {
+ *constructor() {}
+}
diff --git a/deps/v8/test/message/class-constructor-generator.out b/deps/v8/test/message/class-constructor-generator.out
new file mode 100644
index 0000000000..5075e511cc
--- /dev/null
+++ b/deps/v8/test/message/class-constructor-generator.out
@@ -0,0 +1,7 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:9: SyntaxError: Class constructor may not be a generator
+ *constructor() {}
+ ^^^^^^^^^^^
+SyntaxError: Class constructor may not be a generator
diff --git a/deps/v8/test/message/export-duplicate-as.js b/deps/v8/test/message/export-duplicate-as.js
new file mode 100644
index 0000000000..49b52d4b17
--- /dev/null
+++ b/deps/v8/test/message/export-duplicate-as.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+var a, b;
+export { a as c };
+export { a, b as c };
diff --git a/deps/v8/test/message/export-duplicate-as.out b/deps/v8/test/message/export-duplicate-as.out
new file mode 100644
index 0000000000..1726d9491a
--- /dev/null
+++ b/deps/v8/test/message/export-duplicate-as.out
@@ -0,0 +1,7 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:9: SyntaxError: Duplicate export of 'c'
+export { a, b as c };
+ ^
+SyntaxError: Duplicate export of 'c'
diff --git a/deps/v8/test/message/export-duplicate-default.js b/deps/v8/test/message/export-duplicate-default.js
new file mode 100644
index 0000000000..72a54a45f4
--- /dev/null
+++ b/deps/v8/test/message/export-duplicate-default.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+export default function f() {};
+export default class C {};
diff --git a/deps/v8/test/message/export-duplicate-default.out b/deps/v8/test/message/export-duplicate-default.out
new file mode 100644
index 0000000000..4c6b97a7a1
--- /dev/null
+++ b/deps/v8/test/message/export-duplicate-default.out
@@ -0,0 +1,7 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:8: SyntaxError: Duplicate export of 'default'
+export default class C {};
+ ^^^^^^^
+SyntaxError: Duplicate export of 'default'
diff --git a/deps/v8/test/message/export-duplicate.js b/deps/v8/test/message/export-duplicate.js
new file mode 100644
index 0000000000..f45aefe13f
--- /dev/null
+++ b/deps/v8/test/message/export-duplicate.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+var a, b;
+export { a };
+export { a, b };
diff --git a/deps/v8/test/message/export-duplicate.out b/deps/v8/test/message/export-duplicate.out
new file mode 100644
index 0000000000..e88779f580
--- /dev/null
+++ b/deps/v8/test/message/export-duplicate.out
@@ -0,0 +1,7 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:9: SyntaxError: Duplicate export of 'a'
+export { a, b };
+ ^
+SyntaxError: Duplicate export of 'a'
diff --git a/deps/v8/test/message/import-as-eval.js b/deps/v8/test/message/import-as-eval.js
new file mode 100644
index 0000000000..66adc32cbe
--- /dev/null
+++ b/deps/v8/test/message/import-as-eval.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import { foo as eval } from "mod";
diff --git a/deps/v8/test/message/import-as-eval.out b/deps/v8/test/message/import-as-eval.out
new file mode 100644
index 0000000000..622f7fe9e1
--- /dev/null
+++ b/deps/v8/test/message/import-as-eval.out
@@ -0,0 +1,7 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:7: SyntaxError: Unexpected eval or arguments in strict mode
+import { foo as eval } from "mod";
+ ^^^^
+SyntaxError: Unexpected eval or arguments in strict mode
diff --git a/deps/v8/test/message/import-as-redeclaration.js b/deps/v8/test/message/import-as-redeclaration.js
new file mode 100644
index 0000000000..43bf278d1b
--- /dev/null
+++ b/deps/v8/test/message/import-as-redeclaration.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+let foo = 42;
+import { bar as foo } from "mod";
diff --git a/deps/v8/test/message/import-as-redeclaration.out b/deps/v8/test/message/import-as-redeclaration.out
new file mode 100644
index 0000000000..51c4c032dc
--- /dev/null
+++ b/deps/v8/test/message/import-as-redeclaration.out
@@ -0,0 +1,7 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:8: SyntaxError: Identifier 'foo' has already been declared
+import { bar as foo } from "mod";
+ ^^^
+SyntaxError: Identifier 'foo' has already been declared
diff --git a/deps/v8/test/message/import-as-reserved-word.js b/deps/v8/test/message/import-as-reserved-word.js
new file mode 100644
index 0000000000..562699d45f
--- /dev/null
+++ b/deps/v8/test/message/import-as-reserved-word.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import { foo as import } from "mod";
diff --git a/deps/v8/test/message/import-as-reserved-word.out b/deps/v8/test/message/import-as-reserved-word.out
new file mode 100644
index 0000000000..1ee8d41c1a
--- /dev/null
+++ b/deps/v8/test/message/import-as-reserved-word.out
@@ -0,0 +1,7 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:7: SyntaxError: Unexpected reserved word
+import { foo as import } from "mod";
+ ^^^^^^
+SyntaxError: Unexpected reserved word
diff --git a/deps/v8/test/message/import-eval.js b/deps/v8/test/message/import-eval.js
new file mode 100644
index 0000000000..8ab35baef6
--- /dev/null
+++ b/deps/v8/test/message/import-eval.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import { eval } from "mod";
diff --git a/deps/v8/test/message/import-eval.out b/deps/v8/test/message/import-eval.out
new file mode 100644
index 0000000000..148662a28c
--- /dev/null
+++ b/deps/v8/test/message/import-eval.out
@@ -0,0 +1,7 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:7: SyntaxError: Unexpected eval or arguments in strict mode
+import { eval } from "mod";
+ ^^^^
+SyntaxError: Unexpected eval or arguments in strict mode
diff --git a/deps/v8/test/message/import-redeclaration.js b/deps/v8/test/message/import-redeclaration.js
new file mode 100644
index 0000000000..27b0cdccef
--- /dev/null
+++ b/deps/v8/test/message/import-redeclaration.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+let foo = 42;
+import { foo } from "mod";
diff --git a/deps/v8/test/message/import-redeclaration.out b/deps/v8/test/message/import-redeclaration.out
new file mode 100644
index 0000000000..641948810f
--- /dev/null
+++ b/deps/v8/test/message/import-redeclaration.out
@@ -0,0 +1,7 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:8: SyntaxError: Identifier 'foo' has already been declared
+import { foo } from "mod";
+ ^^^
+SyntaxError: Identifier 'foo' has already been declared
diff --git a/deps/v8/test/message/import-reserved-word.js b/deps/v8/test/message/import-reserved-word.js
new file mode 100644
index 0000000000..1fd7ba291e
--- /dev/null
+++ b/deps/v8/test/message/import-reserved-word.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// MODULE
+
+import { import } from "mod";
diff --git a/deps/v8/test/message/import-reserved-word.out b/deps/v8/test/message/import-reserved-word.out
new file mode 100644
index 0000000000..5b990e9e59
--- /dev/null
+++ b/deps/v8/test/message/import-reserved-word.out
@@ -0,0 +1,7 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:7: SyntaxError: Unexpected reserved word
+import { import } from "mod";
+ ^^^^^^
+SyntaxError: Unexpected reserved word
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 5d6ab84663..cfe22f15d7 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -36,6 +36,7 @@ from testrunner.objects import testcase
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
INVALID_FLAGS = ["--enable-slow-asserts"]
+MODULE_PATTERN = re.compile(r"^// MODULE$", flags=re.MULTILINE)
class MessageTestSuite(testsuite.TestSuite):
@@ -63,6 +64,8 @@ class MessageTestSuite(testsuite.TestSuite):
for match in flags_match:
result += match.strip().split()
result += context.mode_flags
+ if MODULE_PATTERN.search(source):
+ result.append("--module")
result = [x for x in result if x not in INVALID_FLAGS]
result.append(os.path.join(self.root, testcase.path + ".js"))
return testcase.flags + result
diff --git a/deps/v8/test/message/unterminated-arg-list.js b/deps/v8/test/message/unterminated-arg-list.js
new file mode 100644
index 0000000000..b0fd1dd893
--- /dev/null
+++ b/deps/v8/test/message/unterminated-arg-list.js
@@ -0,0 +1,7 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+$(document).ready(function() {
+$("html").load( "https://localhost" );
+}
diff --git a/deps/v8/test/message/unterminated-arg-list.out b/deps/v8/test/message/unterminated-arg-list.out
new file mode 100644
index 0000000000..5be2b3d90c
--- /dev/null
+++ b/deps/v8/test/message/unterminated-arg-list.out
@@ -0,0 +1,8 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:7: SyntaxError: missing ) after argument list
+}
+^
+SyntaxError: missing ) after argument list
diff --git a/deps/v8/test/mjsunit/asm/construct-double.js b/deps/v8/test/mjsunit/asm/construct-double.js
new file mode 100644
index 0000000000..8bb5000082
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/construct-double.js
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var stdlib = this;
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+
+var m = (function(stdlib, foreign, heap) {
+ "use asm";
+ function cd1(i, j) {
+ i = i|0;
+ j = j|0;
+ return +%_ConstructDouble(i, j);
+ }
+ function cd2(i) {
+ i = i|0;
+ return +%_ConstructDouble(0, i);
+ }
+ return { cd1: cd1, cd2: cd2 };
+})(stdlib, foreign, heap);
+
+assertEquals(0.0, m.cd1(0, 0));
+assertEquals(%ConstructDouble(0, 1), m.cd2(1));
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(%ConstructDouble(0, i), m.cd2(i));
+ for (var j = -2147483648; j < 2147483648; j += 3999773) {
+ assertEquals(%ConstructDouble(i, j), m.cd1(i, j));
+ }
+}
diff --git a/deps/v8/test/mjsunit/asm/double-hi.js b/deps/v8/test/mjsunit/asm/double-hi.js
new file mode 100644
index 0000000000..5a5f942f7b
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/double-hi.js
@@ -0,0 +1,40 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var stdlib = this;
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+
+var m = (function(stdlib, foreign, heap) {
+ "use asm";
+ function hi1(i) {
+ i = +i;
+ return %_DoubleHi(i)|0;
+ }
+ function hi2(i, j) {
+ i = +i;
+ j = +j;
+ return %_DoubleHi(i)+%_DoubleHi(j)|0;
+ }
+ return { hi1: hi1, hi2: hi2 };
+})(stdlib, foreign, heap);
+
+assertEquals(0, m.hi1(0.0));
+assertEquals(-2147483648, m.hi1(-0.0));
+assertEquals(2146435072, m.hi1(Infinity));
+assertEquals(-1048576, m.hi1(-Infinity));
+assertEquals(0, m.hi2(0.0, 0.0));
+assertEquals(-2147483648, m.hi2(0.0, -0.0));
+assertEquals(-2147483648, m.hi2(-0.0, 0.0));
+assertEquals(0, m.hi2(-0.0, -0.0));
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(%_DoubleHi(i), m.hi1(i));
+ assertEquals(i, m.hi1(%ConstructDouble(i, 0)));
+ assertEquals(i, m.hi1(%ConstructDouble(i, i)));
+ assertEquals(i+i|0, m.hi2(%ConstructDouble(i, 0), %ConstructDouble(i, 0)));
+ assertEquals(i+i|0, m.hi2(%ConstructDouble(i, i), %ConstructDouble(i, i)));
+}
diff --git a/deps/v8/test/mjsunit/asm/double-lo.js b/deps/v8/test/mjsunit/asm/double-lo.js
new file mode 100644
index 0000000000..39d5b5268f
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/double-lo.js
@@ -0,0 +1,40 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var stdlib = this;
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+
+var m = (function(stdlib, foreign, heap) {
+ "use asm";
+ function lo1(i) {
+ i = +i;
+ return %_DoubleLo(i)|0;
+ }
+ function lo2(i, j) {
+ i = +i;
+ j = +j;
+ return %_DoubleLo(i)+%_DoubleLo(j)|0;
+ }
+ return { lo1: lo1, lo2: lo2 };
+})(stdlib, foreign, heap);
+
+assertEquals(0, m.lo1(0.0));
+assertEquals(0, m.lo1(-0.0));
+assertEquals(0, m.lo1(Infinity));
+assertEquals(0, m.lo1(-Infinity));
+assertEquals(0, m.lo2(0.0, 0.0));
+assertEquals(0, m.lo2(0.0, -0.0));
+assertEquals(0, m.lo2(-0.0, 0.0));
+assertEquals(0, m.lo2(-0.0, -0.0));
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(%_DoubleLo(i), m.lo1(i));
+ assertEquals(i, m.lo1(%ConstructDouble(0, i)));
+ assertEquals(i, m.lo1(%ConstructDouble(i, i)));
+ assertEquals(i+i|0, m.lo2(%ConstructDouble(0, i), %ConstructDouble(0, i)));
+ assertEquals(i+i|0, m.lo2(%ConstructDouble(i, i), %ConstructDouble(i, i)));
+}
diff --git a/deps/v8/test/mjsunit/asm/if-cloning.js b/deps/v8/test/mjsunit/asm/if-cloning.js
new file mode 100644
index 0000000000..99d4edc67a
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/if-cloning.js
@@ -0,0 +1,34 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var if0 = (function Module() {
+ "use asm";
+ function if0(i, j) {
+ i = i|0;
+ j = j|0;
+ if (i == 0 ? j == 0 : 0) return 1;
+ return 0;
+ }
+ return {if0: if0};
+})().if0;
+assertEquals(1, if0(0, 0));
+assertEquals(0, if0(11, 0));
+assertEquals(0, if0(0, -1));
+assertEquals(0, if0(-1024, 1));
+
+
+var if1 = (function Module() {
+ "use asm";
+ function if1(i, j) {
+ i = i|0;
+ j = j|0;
+ if (i == 0 ? j == 0 : 1) return 0;
+ return 1;
+ }
+ return {if1: if1};
+})().if1;
+assertEquals(0, if1(0, 0));
+assertEquals(0, if1(11, 0));
+assertEquals(1, if1(0, -1));
+assertEquals(0, if1(-1024, 9));
diff --git a/deps/v8/test/mjsunit/asm/math-clz32.js b/deps/v8/test/mjsunit/asm/math-clz32.js
new file mode 100644
index 0000000000..004aa65b46
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/math-clz32.js
@@ -0,0 +1,31 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var stdlib = { Math: Math };
+
+var f = (function Module(stdlib) {
+ "use asm";
+
+ var clz32 = stdlib.Math.clz32;
+
+ function f(a) {
+ a = a >>> 0;
+ return clz32(a)|0;
+ }
+
+ return f;
+})(stdlib);
+
+assertEquals(32, f(0));
+assertEquals(32, f(NaN));
+assertEquals(32, f(undefined));
+for (var i = 0; i < 32; ++i) {
+ assertEquals(i, f((-1) >>> i));
+}
+for (var i = -2147483648; i < 2147483648; i += 3999773) {
+ assertEquals(%MathClz32(i), f(i));
+ assertEquals(%_MathClz32(i), f(i));
+}
diff --git a/deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js b/deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js
index 9ef8efbc0c..52c94e7548 100644
--- a/deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js
+++ b/deps/v8/test/mjsunit/bugs/harmony/debug-blockscopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --harmony-scoping
+// Flags: --expose-debug-as debug
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
diff --git a/deps/v8/test/mjsunit/compiler/deopt-tonumber-binop.js b/deps/v8/test/mjsunit/compiler/deopt-tonumber-binop.js
new file mode 100644
index 0000000000..c93ef9dfd5
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/deopt-tonumber-binop.js
@@ -0,0 +1,40 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+//
+var f = (function() {
+ "use asm";
+ function f(x, y) {
+ return x - y;
+ }
+ return f;
+})();
+
+var counter = 0;
+
+var deopt = { toString : function() {
+ %DeoptimizeFunction(f);
+ counter++;
+ return "2";
+} };
+
+var o = { toString : function() {
+ counter++;
+ return "1";
+} };
+
+counter = 0;
+assertEquals(1, f(deopt, o));
+assertEquals(2, counter);
+
+%OptimizeFunctionOnNextCall(f);
+counter = 0;
+assertEquals(-1, f(o, deopt));
+assertEquals(2, counter);
+
+%OptimizeFunctionOnNextCall(f);
+counter = 0;
+assertEquals(0, f(deopt, deopt));
+assertEquals(2, counter);
diff --git a/deps/v8/test/mjsunit/compiler/eager-deopt-simple.js b/deps/v8/test/mjsunit/compiler/eager-deopt-simple.js
new file mode 100644
index 0000000000..067400cfc6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/eager-deopt-simple.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g(a, b, c) {
+ return a + b + c;
+}
+
+function f() {
+ return g(1, (%_DeoptimizeNow(), 2), 3);
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+assertEquals(6, f());
diff --git a/deps/v8/test/mjsunit/compiler/osr-forin-nested.js b/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
new file mode 100644
index 0000000000..ad55b30bd8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-forin-nested.js
@@ -0,0 +1,35 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-osr --allow-natives-syntax
+
+function test(e, f, v) {
+ assertEquals(e, f(v));
+ assertEquals(e, f(v));
+ assertEquals(e, f(v));
+}
+
+function foo(t) {
+ for (var x in t) {
+ for (var i = 0; i < 2; i++) {
+ %OptimizeOsr();
+ }
+ }
+ return 5;
+}
+
+test(5, foo, {x:20});
+
+function bar(t) {
+ var sum = 0;
+ for (var x in t) {
+ for (var i = 0; i < 2; i++) {
+ %OptimizeOsr();
+ sum += t[x];
+ }
+ }
+ return sum;
+}
+
+test(62, bar, {x:20,y:11});
diff --git a/deps/v8/test/mjsunit/compiler/osr-infinite.js b/deps/v8/test/mjsunit/compiler/osr-infinite.js
new file mode 100644
index 0000000000..aa74c877d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-infinite.js
@@ -0,0 +1,78 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --use-osr --allow-natives-syntax --turbo-osr
+
+var global_counter = 0;
+
+function thrower() {
+ var x = global_counter++;
+ if (x == 5) %OptimizeOsr(thrower.caller);
+ if (x == 10) throw "terminate";
+}
+
+%NeverOptimizeFunction(thrower); // Don't want to inline the thrower.
+%NeverOptimizeFunction(test); // Don't want to inline the func into test.
+
+function test(func) {
+ for (var i = 0; i < 3; i++) {
+ global_counter = 0;
+ assertThrows(func);
+ }
+}
+
+function n1() {
+ while (true) thrower();
+}
+
+function n2() {
+ while (true) while (true) thrower();
+}
+
+function n3() {
+ while (true) while (true) while (true) thrower();
+}
+
+function n4() {
+ while (true) while (true) while (true) while (true) thrower();
+}
+
+function b1(a) {
+ while (true) {
+ thrower();
+ if (a) break
+ }
+}
+
+
+function b2(a) {
+ while (true) {
+ while (true) {
+ thrower();
+ if (a) break
+ }
+ }
+}
+
+
+function b3(a) {
+ while (true) {
+ while (true) {
+ while (true) {
+ thrower();
+ if (a) break
+ }
+ if (a) break
+ }
+ }
+}
+
+
+test(n1);
+test(n2);
+test(n3);
+test(n4);
+test(b1);
+test(b2);
+test(b3);
diff --git a/deps/v8/test/mjsunit/compiler/osr-labeled.js b/deps/v8/test/mjsunit/compiler/osr-labeled.js
new file mode 100644
index 0000000000..1a9709285e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-labeled.js
@@ -0,0 +1,47 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+function foo() {
+ var sum = 0;
+ A: for (var i = 0; i < 5; i++) {
+ B: for (var j = 0; j < 5; j++) {
+ C: for (var k = 0; k < 10; k++) {
+ if (k === 5) %OptimizeOsr();
+ if (k === 6) break B;
+ sum++;
+ }
+ }
+ }
+ return sum;
+}
+
+assertEquals(30, foo());
+assertEquals(30, foo());
+
+function bar(a) {
+ var sum = 0;
+ A: for (var i = 0; i < 5; i++) {
+ B: for (var j = 0; j < 5; j++) {
+ C: for (var k = 0; k < 10; k++) {
+ sum++;
+ %OptimizeOsr();
+ if (a === 1) break A;
+ if (a === 2) break B;
+ if (a === 3) break C;
+ }
+ }
+ }
+ return sum;
+}
+
+assertEquals(1, bar(1));
+assertEquals(1, bar(1));
+
+assertEquals(5, bar(2));
+assertEquals(5, bar(2));
+
+assertEquals(25, bar(3));
+assertEquals(25, bar(3));
diff --git a/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js b/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
new file mode 100644
index 0000000000..950d8b0762
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-literals-adapted.js
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+function mod() {
+ function f0() {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function f1(a) {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function f2(a,b) {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function f3(a,b,c) {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function f4(a,b,c,d) {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function bar() {
+ assertEquals(3, f0().blah);
+ assertEquals(3, f1().blah);
+ assertEquals(3, f2().blah);
+ assertEquals(3, f3().blah);
+ assertEquals(3, f4().blah);
+ }
+ bar();
+}
+
+
+mod();
+mod();
+mod();
diff --git a/deps/v8/test/mjsunit/compiler/osr-literals.js b/deps/v8/test/mjsunit/compiler/osr-literals.js
new file mode 100644
index 0000000000..d9f68a0b37
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/osr-literals.js
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --use-osr --turbo-osr
+
+function mod() {
+ function f0() {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function f1(a) {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function f2(a,b) {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function f3(a,b,c) {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function f4(a,b,c,d) {
+ for (var i = 0; i < 3; i = i + 1 | 0) {
+ %OptimizeOsr();
+ }
+ return {blah: i};
+ }
+
+ function bar() {
+ assertEquals(3, f0().blah);
+ assertEquals(3, f1(1).blah);
+ assertEquals(3, f2(1,2).blah);
+ assertEquals(3, f3(1,2,3).blah);
+ assertEquals(3, f4(1,2,3,4).blah);
+ }
+ bar();
+}
+
+
+mod();
+mod();
+mod();
diff --git a/deps/v8/test/mjsunit/compiler/regress-463056.js b/deps/v8/test/mjsunit/compiler/regress-463056.js
new file mode 100644
index 0000000000..fb871618e4
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-463056.js
@@ -0,0 +1,9 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+ return ((0%0)&1) + (1>>>(0%0));
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-468727.js b/deps/v8/test/mjsunit/compiler/regress-468727.js
new file mode 100644
index 0000000000..a69efe5a56
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-468727.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noanalyze-environment-liveness
+
+function f() {
+ var __v_7 = -126 - __v_3;
+ var __v_17 = ((__v_15 & __v_14) != 4) | 16;
+ if (__v_17) {
+ var __v_11 = 1 << __v_7;
+ }
+ __v_12 >>= __v_3;
+}
+
+assertThrows(f);
diff --git a/deps/v8/test/mjsunit/compiler/regress-469089.js b/deps/v8/test/mjsunit/compiler/regress-469089.js
new file mode 100644
index 0000000000..6aff2b7203
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-469089.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc
+
+(function() {
+ var __v_6 = false;
+ function f(val, idx) {
+ if (idx === 1) {
+ gc();
+ __v_6 = (val === 0);
+ }
+ }
+ f(.1, 1);
+})();
diff --git a/deps/v8/test/mjsunit/compiler/truncating-store-deopt.js b/deps/v8/test/mjsunit/compiler/truncating-store-deopt.js
new file mode 100644
index 0000000000..a640caf583
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/truncating-store-deopt.js
@@ -0,0 +1,28 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g(a, b, c) {
+ return a + b + c;
+}
+
+var asm = (function Module(global, env, buffer) {
+ "use asm";
+
+ var i32 = new global.Int32Array(buffer);
+
+ // This is not valid asm.js, but we should still generate correct code.
+ function store(x) {
+ return g(1, i32[0] = x, 2);
+ }
+
+ return { store: store };
+})({
+ "Int32Array": Int32Array
+}, {}, new ArrayBuffer(64 * 1024));
+
+var o = { toString : function() { %DeoptimizeFunction(asm.store); return "1"; } }
+
+asm.store(o);
diff --git a/deps/v8/test/mjsunit/compiler/try-deopt.js b/deps/v8/test/mjsunit/compiler/try-deopt.js
new file mode 100644
index 0000000000..dc44e7326f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/try-deopt.js
@@ -0,0 +1,56 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(mstarzinger): Add FLAG_turbo_exceptions once we want ClusterFuzz.
+// Flags: --allow-natives-syntax --turbo-deoptimization
+
+function DeoptFromTry(x) {
+ try {
+ %DeoptimizeFunction(DeoptFromTry);
+ throw x;
+ } catch (e) {
+ return e + 1;
+ }
+ return x + 2;
+}
+%OptimizeFunctionOnNextCall(DeoptFromTry);
+assertEquals(24, DeoptFromTry(23));
+
+
+function DeoptFromCatch(x) {
+ try {
+ throw x;
+ } catch (e) {
+ %DeoptimizeFunction(DeoptFromCatch);
+ return e + 1;
+ }
+ return x + 2;
+}
+%OptimizeFunctionOnNextCall(DeoptFromCatch);
+assertEquals(24, DeoptFromCatch(23));
+
+
+function DeoptFromFinally_Return(x) {
+ try {
+ throw x;
+ } finally {
+ %DeoptimizeFunction(DeoptFromFinally_Return);
+ return x + 1;
+ }
+ return x + 2;
+}
+%OptimizeFunctionOnNextCall(DeoptFromFinally_Return);
+assertEquals(24, DeoptFromFinally_Return(23));
+
+
+function DeoptFromFinally_ReThrow(x) {
+ try {
+ throw x;
+ } finally {
+ %DeoptimizeFunction(DeoptFromFinally_ReThrow);
+ }
+ return x + 2;
+}
+%OptimizeFunctionOnNextCall(DeoptFromFinally_ReThrow);
+assertThrows("DeoptFromFinally_ReThrow(new Error)", Error);
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index 73cf040f5a..3f82c2fa43 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --nodead-code-elimination --fold-constants --allow-natives-syntax
+// Flags: --nodead-code-elimination --fold-constants --allow-natives-syntax --nostress-opt
function test(f) {
f();
diff --git a/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js b/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js
new file mode 100644
index 0000000000..f0613b2926
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-allscopes-on-debugger.js
@@ -0,0 +1,58 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+var exception = null;
+var break_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
+ // Count number of expected breakpoints in this source file.
+ if (!break_count) {
+ var source_text = exec_state.frame(0).func().script().source();
+ expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
+ print("Expected breaks: " + expected_breaks);
+ }
+ var frameMirror = exec_state.frame(0);
+
+ frameMirror.allScopes();
+ var source = frameMirror.sourceLineText();
+ print("paused at: " + source);
+ assertTrue(source.indexOf("// Break " + break_count + ".") > 0,
+ "Unexpected pause at: " + source + "\n" +
+ "Expected: // Break " + break_count + ".");
+ ++break_count;
+
+ if (break_count !== expected_breaks) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ print("Next step prepared");
+ }
+ }
+ } catch(e) {
+ exception = e;
+ print(e, e.stack);
+ }
+};
+
+Debug.setListener(listener);
+
+var sum = 0;
+(function (){
+ 'use strict';
+
+ debugger; // Break 0.
+ var i = 0; // Break 1.
+ i++; // Break 2.
+ i++; // Break 3.
+ return i; // Break 4.
+}()); // Break 5.
+
+assertNull(exception); // Break 6.
+assertEquals(expected_breaks, break_count);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/debug-liveedit-check-stack.js b/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
index 6948a70d6b..d843ca6a60 100644
--- a/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
+++ b/deps/v8/test/mjsunit/debug-liveedit-check-stack.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --allow-natives-syntax
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
@@ -87,13 +87,13 @@ function WrapInCatcher(f, holder) {
function WrapInNativeCall(f) {
return function() {
- return Debug.ExecuteInDebugContext(f, true);
+ return %Call(undefined, f);
};
}
function WrapInDebuggerCall(f) {
return function() {
- return Debug.ExecuteInDebugContext(f, false);
+ return %ExecuteInDebugContext(f);
};
}
diff --git a/deps/v8/test/mjsunit/debug-references.js b/deps/v8/test/mjsunit/debug-references.js
index bb339768b8..cb9f3701e2 100644
--- a/deps/v8/test/mjsunit/debug-references.js
+++ b/deps/v8/test/mjsunit/debug-references.js
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --turbo-deoptimization
+// Flags: --stack-trace-on-illegal
+
// Get the Debug object exposed from the debug context global object.
Debug = debug.Debug
@@ -98,7 +100,8 @@ function listener(event, exec_state, event_data, data) {
listenerComplete = true;
}
} catch (e) {
- exception = e
+ exception = e;
+ print(e + "\n" + e.stack);
};
};
diff --git a/deps/v8/test/mjsunit/debug-scopes.js b/deps/v8/test/mjsunit/debug-scopes.js
index 7c08120e2a..78a70af26a 100644
--- a/deps/v8/test/mjsunit/debug-scopes.js
+++ b/deps/v8/test/mjsunit/debug-scopes.js
@@ -1049,6 +1049,30 @@ catch_block_7();
EndTest();
+BeginTest("Classes and methods 1");
+
+listener_delegate = function(exec_state) {
+ "use strict"
+ CheckScopeChain([debug.ScopeType.Local,
+ debug.ScopeType.Block,
+ debug.ScopeType.Script,
+ debug.ScopeType.Global], exec_state);
+ CheckScopeContent({C1: class { m() { debugger; }} }, 1, exec_state);
+};
+
+(function() {
+ "use strict";
+ class C1 {
+ m() {
+ debugger;
+ }
+ }
+ new C1().m();
+})();
+
+EndTest();
+
+
assertEquals(begin_test_count, break_count,
'one or more tests did not enter the debugger');
assertEquals(begin_test_count, end_test_count,
diff --git a/deps/v8/test/mjsunit/debug-set-variable-value.js b/deps/v8/test/mjsunit/debug-set-variable-value.js
index 4667a71d6b..65434289d0 100644
--- a/deps/v8/test/mjsunit/debug-set-variable-value.js
+++ b/deps/v8/test/mjsunit/debug-set-variable-value.js
@@ -29,6 +29,7 @@
// Get the Debug object exposed from the debug context global object.
var Debug = debug.Debug;
+var DebugCommandProcessor = debug.DebugCommandProcessor;
// Accepts a function/closure 'fun' that must have a debugger statement inside.
// A variable 'variable_name' must be initialized before debugger statement
@@ -291,18 +292,18 @@ RunPauseTest(0, 5, 'p', 2012, 2012, (function Factory() {
// Test value description protocol JSON
-assertEquals(true, Debug.TestApi.CommandProcessorResolveValue({value: true}));
+assertEquals(true, DebugCommandProcessor.resolveValue_({value: true}));
-assertSame(null, Debug.TestApi.CommandProcessorResolveValue({type: "null"}));
+assertSame(null, DebugCommandProcessor.resolveValue_({type: "null"}));
assertSame(undefined,
- Debug.TestApi.CommandProcessorResolveValue({type: "undefined"}));
+ DebugCommandProcessor.resolveValue_({type: "undefined"}));
-assertSame("123", Debug.TestApi.CommandProcessorResolveValue(
+assertSame("123", DebugCommandProcessor.resolveValue_(
{type: "string", stringDescription: "123"}));
-assertSame(123, Debug.TestApi.CommandProcessorResolveValue(
+assertSame(123, DebugCommandProcessor.resolveValue_(
{type: "number", stringDescription: "123"}));
-assertSame(Number, Debug.TestApi.CommandProcessorResolveValue(
+assertSame(Number, DebugCommandProcessor.resolveValue_(
{handle: Debug.MakeMirror(Number).handle()}));
-assertSame(RunClosureTest, Debug.TestApi.CommandProcessorResolveValue(
+assertSame(RunClosureTest, DebugCommandProcessor.resolveValue_(
{handle: Debug.MakeMirror(RunClosureTest).handle()}));
diff --git a/deps/v8/test/mjsunit/debug-sourceinfo.js b/deps/v8/test/mjsunit/debug-sourceinfo.js
index ddf80dc51c..1dbe1b7a0a 100644
--- a/deps/v8/test/mjsunit/debug-sourceinfo.js
+++ b/deps/v8/test/mjsunit/debug-sourceinfo.js
@@ -1,352 +1,266 @@
-// Copyright 2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// For this test to work this file MUST have CR LF line endings.
-function a() { b(); };
-function b() {
- c(true);
-};
- function c(x) {
- if (x) {
- return 1;
- } else {
- return 1;
- }
- };
-function d(x) {
- x = 1 ;
- x = 2 ;
- x = 3 ;
- x = 4 ;
- x = 5 ;
- x = 6 ;
- x = 7 ;
- x = 8 ;
- x = 9 ;
- x = 10;
- x = 11;
- x = 12;
- x = 13;
- x = 14;
- x = 15;
-}
-
-// Get the Debug object exposed from the debug context global object.
-Debug = debug.Debug
-
-// This is the number of comment lines above the first test function.
-var comment_lines = 29;
-
-// This is the last position in the entire file (note: this equals
-// file size of <debug-sourceinfo.js> - 1, since starting at 0).
-var last_position = 14312;
-// This is the last line of entire file (note: starting at 0).
-var last_line = 351;
-// This is the last column of last line (note: starting at 0 and +2, due
-// to trailing <CR><LF>).
-var last_column = 2;
-
-// This magic number is the length or the first line comment (actually number
-// of characters before 'function a(...'.
-var comment_line_length = 1726;
-var start_a = 10 + comment_line_length;
-var start_b = 37 + comment_line_length;
-var start_c = 71 + comment_line_length;
-var start_d = 163 + comment_line_length;
-
-// The position of the first line of d(), i.e. "x = 1 ;".
-var start_code_d = start_d + 7;
-// The line # of the first line of d() (note: starting at 0).
-var start_line_d = 41;
-var line_length_d = 11;
-var num_lines_d = 15;
-
-assertEquals(start_a, Debug.sourcePosition(a));
-assertEquals(start_b, Debug.sourcePosition(b));
-assertEquals(start_c, Debug.sourcePosition(c));
-assertEquals(start_d, Debug.sourcePosition(d));
-
-var script = Debug.findScript(a);
-assertTrue(script.data === Debug.findScript(b).data);
-assertTrue(script.data === Debug.findScript(c).data);
-assertTrue(script.data === Debug.findScript(d).data);
-assertTrue(script.source === Debug.findScript(b).source);
-assertTrue(script.source === Debug.findScript(c).source);
-assertTrue(script.source === Debug.findScript(d).source);
-
-// Test that when running through source positions the position, line and
-// column progresses as expected.
-var position;
-var line;
-var column;
-for (var p = 0; p < 100; p++) {
- var location = script.locationFromPosition(p);
- if (p > 0) {
- assertEquals(position + 1, location.position);
- if (line == location.line) {
- assertEquals(column + 1, location.column);
- } else {
- assertEquals(line + 1, location.line);
- assertEquals(0, location.column);
- }
- } else {
- assertEquals(0, location.position);
- assertEquals(0, location.line);
- assertEquals(0, location.column);
- }
-
- // Remember the location.
- position = location.position;
- line = location.line;
- column = location.column;
-}
-
-// Every line of d() is the same length. Verify we can loop through all
-// positions and find the right line # for each.
-var p = start_code_d;
-for (line = 0; line < num_lines_d; line++) {
- for (column = 0; column < line_length_d; column++) {
- var location = script.locationFromPosition(p);
- assertEquals(p, location.position);
- assertEquals(start_line_d + line, location.line);
- assertEquals(column, location.column);
- p++;
- }
-}
-
-// Test first position.
-assertEquals(0, script.locationFromPosition(0).position);
-assertEquals(0, script.locationFromPosition(0).line);
-assertEquals(0, script.locationFromPosition(0).column);
-
-// Test second position.
-assertEquals(1, script.locationFromPosition(1).position);
-assertEquals(0, script.locationFromPosition(1).line);
-assertEquals(1, script.locationFromPosition(1).column);
-
-// Test first position in function a().
-assertEquals(start_a, script.locationFromPosition(start_a).position);
-assertEquals(0, script.locationFromPosition(start_a).line - comment_lines);
-assertEquals(10, script.locationFromPosition(start_a).column);
-
-// Test first position in function b().
-assertEquals(start_b, script.locationFromPosition(start_b).position);
-assertEquals(1, script.locationFromPosition(start_b).line - comment_lines);
-assertEquals(13, script.locationFromPosition(start_b).column);
-
-// Test first position in function c().
-assertEquals(start_c, script.locationFromPosition(start_c).position);
-assertEquals(4, script.locationFromPosition(start_c).line - comment_lines);
-assertEquals(12, script.locationFromPosition(start_c).column);
-
-// Test first position in function d().
-assertEquals(start_d, script.locationFromPosition(start_d).position);
-assertEquals(11, script.locationFromPosition(start_d).line - comment_lines);
-assertEquals(10, script.locationFromPosition(start_d).column);
-
-// Test first line.
-assertEquals(0, script.locationFromLine().position);
-assertEquals(0, script.locationFromLine().line);
-assertEquals(0, script.locationFromLine().column);
-assertEquals(0, script.locationFromLine(0).position);
-assertEquals(0, script.locationFromLine(0).line);
-assertEquals(0, script.locationFromLine(0).column);
-
-// Test first line column 1.
-assertEquals(1, script.locationFromLine(0, 1).position);
-assertEquals(0, script.locationFromLine(0, 1).line);
-assertEquals(1, script.locationFromLine(0, 1).column);
-
-// Test first line offset 1.
-assertEquals(1, script.locationFromLine(0, 0, 1).position);
-assertEquals(0, script.locationFromLine(0, 0, 1).line);
-assertEquals(1, script.locationFromLine(0, 0, 1).column);
-
-// Test offset function a().
-assertEquals(start_a, script.locationFromLine(void 0, void 0, start_a).position);
-assertEquals(0, script.locationFromLine(void 0, void 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(void 0, void 0, start_a).column);
-assertEquals(start_a, script.locationFromLine(0, void 0, start_a).position);
-assertEquals(0, script.locationFromLine(0, void 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(0, void 0, start_a).column);
-assertEquals(start_a, script.locationFromLine(0, 0, start_a).position);
-assertEquals(0, script.locationFromLine(0, 0, start_a).line - comment_lines);
-assertEquals(10, script.locationFromLine(0, 0, start_a).column);
-
-// Test second line offset function a().
-assertEquals(start_a + 14, script.locationFromLine(1, 0, start_a).position);
-assertEquals(1, script.locationFromLine(1, 0, start_a).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 0, start_a).column);
-
-// Test second line column 2 offset function a().
-assertEquals(start_a + 14 + 2, script.locationFromLine(1, 2, start_a).position);
-assertEquals(1, script.locationFromLine(1, 2, start_a).line - comment_lines);
-assertEquals(2, script.locationFromLine(1, 2, start_a).column);
-
-// Test offset function b().
-assertEquals(start_b, script.locationFromLine(0, 0, start_b).position);
-assertEquals(1, script.locationFromLine(0, 0, start_b).line - comment_lines);
-assertEquals(13, script.locationFromLine(0, 0, start_b).column);
-
-// Test second line offset function b().
-assertEquals(start_b + 6, script.locationFromLine(1, 0, start_b).position);
-assertEquals(2, script.locationFromLine(1, 0, start_b).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 0, start_b).column);
-
-// Test second line column 11 offset function b().
-assertEquals(start_b + 6 + 11, script.locationFromLine(1, 11, start_b).position);
-assertEquals(2, script.locationFromLine(1, 11, start_b).line - comment_lines);
-assertEquals(11, script.locationFromLine(1, 11, start_b).column);
-
-// Test second line column 12 offset function b. Second line in b is 11 long
-// using column 12 wraps to next line.
-assertEquals(start_b + 6 + 12, script.locationFromLine(1, 12, start_b).position);
-assertEquals(3, script.locationFromLine(1, 12, start_b).line - comment_lines);
-assertEquals(0, script.locationFromLine(1, 12, start_b).column);
-
-// Test the Debug.findSourcePosition which wraps SourceManager.
-assertEquals(0 + start_a, Debug.findFunctionSourceLocation(a, 0, 0).position);
-assertEquals(0 + start_b, Debug.findFunctionSourceLocation(b, 0, 0).position);
-assertEquals(6 + start_b, Debug.findFunctionSourceLocation(b, 1, 0).position);
-assertEquals(8 + start_b, Debug.findFunctionSourceLocation(b, 1, 2).position);
-assertEquals(18 + start_b, Debug.findFunctionSourceLocation(b, 2, 0).position);
-assertEquals(0 + start_c, Debug.findFunctionSourceLocation(c, 0, 0).position);
-assertEquals(7 + start_c, Debug.findFunctionSourceLocation(c, 1, 0).position);
-assertEquals(21 + start_c, Debug.findFunctionSourceLocation(c, 2, 0).position);
-assertEquals(38 + start_c, Debug.findFunctionSourceLocation(c, 3, 0).position);
-assertEquals(52 + start_c, Debug.findFunctionSourceLocation(c, 4, 0).position);
-assertEquals(69 + start_c, Debug.findFunctionSourceLocation(c, 5, 0).position);
-assertEquals(76 + start_c, Debug.findFunctionSourceLocation(c, 6, 0).position);
-assertEquals(0 + start_d, Debug.findFunctionSourceLocation(d, 0, 0).position);
-assertEquals(7 + start_d, Debug.findFunctionSourceLocation(d, 1, 0).position);
-for (i = 1; i <= num_lines_d; i++) {
- assertEquals(7 + (i * line_length_d) + start_d, Debug.findFunctionSourceLocation(d, (i + 1), 0).position);
-}
-assertEquals(175 + start_d, Debug.findFunctionSourceLocation(d, 17, 0).position);
-
-// Make sure invalid inputs work properly.
-assertEquals(0, script.locationFromPosition(-1).line);
-assertEquals(null, script.locationFromPosition(last_position + 1));
-
-// Test last position.
-assertEquals(last_position, script.locationFromPosition(last_position).position);
-assertEquals(last_line, script.locationFromPosition(last_position).line);
-assertEquals(last_column, script.locationFromPosition(last_position).column);
-
-// Test source line and restriction. All the following tests start from line 1
-// column 2 in function b, which is the call to c.
-// c(true);
-// ^
-
-var location;
-
-location = script.locationFromLine(1, 0, start_b);
-assertEquals(' c(true);', location.sourceText());
-
-result = ['c', ' c', ' c(', ' c(', ' c(t']
-for (var i = 1; i <= 5; i++) {
- location = script.locationFromLine(1, 2, start_b);
- location.restrict(i);
- assertEquals(result[i - 1], location.sourceText());
-}
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(1, 0);
-assertEquals('c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 0);
-assertEquals('c(', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 1);
-assertEquals(' c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 2);
-assertEquals(' c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(2, 3);
-assertEquals(' c', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(3, 1);
-assertEquals(' c(', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(5, 0);
-assertEquals('c(tru', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(5, 2);
-assertEquals(' c(t', location.sourceText());
-
-location = script.locationFromLine(1, 2, start_b);
-location.restrict(5, 4);
-assertEquals(' c(t', location.sourceText());
-
-// All the following tests start from line 1 column 10 in function b, which is
-// the final character.
-// c(true);
-// ^
-
-location = script.locationFromLine(1, 10, start_b);
-location.restrict(5, 0);
-assertEquals('rue);', location.sourceText());
-
-location = script.locationFromLine(1, 10, start_b);
-location.restrict(7, 0);
-assertEquals('(true);', location.sourceText());
-
-// All the following tests start from line 1 column 0 in function b, which is
-// the first character.
-// c(true);
-//^
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(5, 0);
-assertEquals(' c(t', location.sourceText());
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(5, 4);
-assertEquals(' c(t', location.sourceText());
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(7, 0);
-assertEquals(' c(tru', location.sourceText());
-
-location = script.locationFromLine(1, 0, start_b);
-location.restrict(7, 6);
-assertEquals(' c(tru', location.sourceText());
-
-// Test that script.sourceLine(line) works.
-for (line = 0; line < num_lines_d; line++) {
- var line_content_regexp = new RegExp(" x = " + (line + 1));
- assertTrue(line_content_regexp.test(script.sourceLine(start_line_d + line)));
-}
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-debug-as debug
+function a() { b(); };
+function b() {
+ c(true);
+};
+ function c(x) {
+ if (x) {
+ return 1;
+ } else {
+ return 1;
+ }
+ };
+function d(x) {
+ x = 1 ;
+ x = 2 ;
+ x = 3 ;
+ x = 4 ;
+ x = 5 ;
+ x = 6 ;
+ x = 7 ;
+ x = 8 ;
+ x = 9 ;
+ x = 10;
+ x = 11;
+ x = 12;
+ x = 13;
+ x = 14;
+ x = 15;
+}
+
+// Get the Debug object exposed from the debug context global object.
+Debug = debug.Debug
+
+// This is the number of comment lines above the first test function.
+var comment_lines = 28;
+
+// This is the last position in the entire file (note: this equals
+// file size of <debug-sourceinfo.js> - 1, since starting at 0).
+var last_position = 11337;
+// This is the last line of entire file (note: starting at 0).
+var last_line = 265;
+// This is the last column of last line (note: starting at 0 and +1, due
+// to trailing <LF>).
+var last_column = 1;
+
+// This magic number is the length or the first line comment (actually number
+// of characters before 'function a(...'.
+var comment_line_length = 1633;
+var start_a = 9 + comment_line_length;
+var start_b = 35 + comment_line_length;
+var start_c = 66 + comment_line_length;
+var start_d = 151 + comment_line_length;
+
+// The position of the first line of d(), i.e. "x = 1 ;".
+var start_code_d = start_d + 6;
+// The line # of the first line of d() (note: starting at 0).
+var start_line_d = 40;
+var line_length_d = 10;
+var num_lines_d = 15;
+
+assertEquals(start_a, Debug.sourcePosition(a));
+assertEquals(start_b, Debug.sourcePosition(b));
+assertEquals(start_c, Debug.sourcePosition(c));
+assertEquals(start_d, Debug.sourcePosition(d));
+
+var script = Debug.findScript(a);
+assertTrue(script.data === Debug.findScript(b).data);
+assertTrue(script.data === Debug.findScript(c).data);
+assertTrue(script.data === Debug.findScript(d).data);
+assertTrue(script.source === Debug.findScript(b).source);
+assertTrue(script.source === Debug.findScript(c).source);
+assertTrue(script.source === Debug.findScript(d).source);
+
+// Test that when running through source positions the position, line and
+// column progresses as expected.
+var position;
+var line;
+var column;
+for (var p = 0; p < 100; p++) {
+ var location = script.locationFromPosition(p);
+ if (p > 0) {
+ assertEquals(position + 1, location.position);
+ if (line == location.line) {
+ assertEquals(column + 1, location.column);
+ } else {
+ assertEquals(line + 1, location.line);
+ assertEquals(0, location.column);
+ }
+ } else {
+ assertEquals(0, location.position);
+ assertEquals(0, location.line);
+ assertEquals(0, location.column);
+ }
+
+ // Remember the location.
+ position = location.position;
+ line = location.line;
+ column = location.column;
+}
+
+// Every line of d() is the same length. Verify we can loop through all
+// positions and find the right line # for each.
+var p = start_code_d;
+for (line = 0; line < num_lines_d; line++) {
+ for (column = 0; column < line_length_d; column++) {
+ var location = script.locationFromPosition(p);
+ assertEquals(p, location.position);
+ assertEquals(start_line_d + line, location.line);
+ assertEquals(column, location.column);
+ p++;
+ }
+}
+
+// Test first position.
+assertEquals(0, script.locationFromPosition(0).position);
+assertEquals(0, script.locationFromPosition(0).line);
+assertEquals(0, script.locationFromPosition(0).column);
+
+// Test second position.
+assertEquals(1, script.locationFromPosition(1).position);
+assertEquals(0, script.locationFromPosition(1).line);
+assertEquals(1, script.locationFromPosition(1).column);
+
+// Test first position in function a().
+assertEquals(start_a, script.locationFromPosition(start_a).position);
+assertEquals(0, script.locationFromPosition(start_a).line - comment_lines);
+assertEquals(10, script.locationFromPosition(start_a).column);
+
+// Test first position in function b().
+assertEquals(start_b, script.locationFromPosition(start_b).position);
+assertEquals(1, script.locationFromPosition(start_b).line - comment_lines);
+assertEquals(13, script.locationFromPosition(start_b).column);
+
+// Test first position in function c().
+assertEquals(start_c, script.locationFromPosition(start_c).position);
+assertEquals(4, script.locationFromPosition(start_c).line - comment_lines);
+assertEquals(12, script.locationFromPosition(start_c).column);
+
+// Test first position in function d().
+assertEquals(start_d, script.locationFromPosition(start_d).position);
+assertEquals(11, script.locationFromPosition(start_d).line - comment_lines);
+assertEquals(10, script.locationFromPosition(start_d).column);
+
+// Test first line.
+assertEquals(0, script.locationFromLine().position);
+assertEquals(0, script.locationFromLine().line);
+assertEquals(0, script.locationFromLine().column);
+assertEquals(0, script.locationFromLine(0).position);
+assertEquals(0, script.locationFromLine(0).line);
+assertEquals(0, script.locationFromLine(0).column);
+
+// Test first line column 1.
+assertEquals(1, script.locationFromLine(0, 1).position);
+assertEquals(0, script.locationFromLine(0, 1).line);
+assertEquals(1, script.locationFromLine(0, 1).column);
+
+// Test first line offset 1.
+assertEquals(1, script.locationFromLine(0, 0, 1).position);
+assertEquals(0, script.locationFromLine(0, 0, 1).line);
+assertEquals(1, script.locationFromLine(0, 0, 1).column);
+
+// Test offset function a().
+assertEquals(start_a, script.locationFromLine(void 0, void 0, start_a).position);
+assertEquals(0, script.locationFromLine(void 0, void 0, start_a).line - comment_lines);
+assertEquals(10, script.locationFromLine(void 0, void 0, start_a).column);
+assertEquals(start_a, script.locationFromLine(0, void 0, start_a).position);
+assertEquals(0, script.locationFromLine(0, void 0, start_a).line - comment_lines);
+assertEquals(10, script.locationFromLine(0, void 0, start_a).column);
+assertEquals(start_a, script.locationFromLine(0, 0, start_a).position);
+assertEquals(0, script.locationFromLine(0, 0, start_a).line - comment_lines);
+assertEquals(10, script.locationFromLine(0, 0, start_a).column);
+
+// Test second line offset function a().
+assertEquals(start_a + 13, script.locationFromLine(1, 0, start_a).position);
+assertEquals(1, script.locationFromLine(1, 0, start_a).line - comment_lines);
+assertEquals(0, script.locationFromLine(1, 0, start_a).column);
+
+// Test second line column 2 offset function a().
+assertEquals(start_a + 13 + 1, script.locationFromLine(1, 1, start_a).position);
+assertEquals(1, script.locationFromLine(1, 2, start_a).line - comment_lines);
+assertEquals(2, script.locationFromLine(1, 2, start_a).column);
+
+// Test offset function b().
+assertEquals(start_b, script.locationFromLine(0, 0, start_b).position);
+assertEquals(1, script.locationFromLine(0, 0, start_b).line - comment_lines);
+assertEquals(13, script.locationFromLine(0, 0, start_b).column);
+
+// Test second line offset function b().
+assertEquals(start_b + 5, script.locationFromLine(1, 0, start_b).position);
+assertEquals(2, script.locationFromLine(1, 0, start_b).line - comment_lines);
+assertEquals(0, script.locationFromLine(1, 0, start_b).column);
+
+// Test second line column 10 offset function b().
+assertEquals(start_b + 5 + 10, script.locationFromLine(1, 10, start_b).position);
+assertEquals(2, script.locationFromLine(1, 10, start_b).line - comment_lines);
+assertEquals(10, script.locationFromLine(1, 10, start_b).column);
+
+// Test second line column 11 offset function b. Second line in b is 10 long
+// using column 11 wraps to next line.
+assertEquals(start_b + 5 + 11, script.locationFromLine(1, 11, start_b).position);
+assertEquals(3, script.locationFromLine(1, 11, start_b).line - comment_lines);
+assertEquals(0, script.locationFromLine(1, 11, start_b).column);
+
+// Test the Debug.findSourcePosition which wraps SourceManager.
+assertEquals(0 + start_a, Debug.findFunctionSourceLocation(a, 0, 0).position);
+assertEquals(0 + start_b, Debug.findFunctionSourceLocation(b, 0, 0).position);
+assertEquals(5 + start_b, Debug.findFunctionSourceLocation(b, 1, 0).position);
+assertEquals(7 + start_b, Debug.findFunctionSourceLocation(b, 1, 2).position);
+assertEquals(16 + start_b, Debug.findFunctionSourceLocation(b, 2, 0).position);
+assertEquals(0 + start_c, Debug.findFunctionSourceLocation(c, 0, 0).position);
+assertEquals(6 + start_c, Debug.findFunctionSourceLocation(c, 1, 0).position);
+assertEquals(19 + start_c, Debug.findFunctionSourceLocation(c, 2, 0).position);
+assertEquals(35 + start_c, Debug.findFunctionSourceLocation(c, 3, 0).position);
+assertEquals(48 + start_c, Debug.findFunctionSourceLocation(c, 4, 0).position);
+assertEquals(64 + start_c, Debug.findFunctionSourceLocation(c, 5, 0).position);
+assertEquals(70 + start_c, Debug.findFunctionSourceLocation(c, 6, 0).position);
+assertEquals(0 + start_d, Debug.findFunctionSourceLocation(d, 0, 0).position);
+assertEquals(6 + start_d, Debug.findFunctionSourceLocation(d, 1, 0).position);
+for (i = 1; i <= num_lines_d; i++) {
+ assertEquals(6 + (i * line_length_d) + start_d, Debug.findFunctionSourceLocation(d, (i + 1), 0).position);
+}
+assertEquals(158 + start_d, Debug.findFunctionSourceLocation(d, 17, 0).position);
+
+// Make sure invalid inputs work properly.
+assertEquals(0, script.locationFromPosition(-1).line);
+assertEquals(null, script.locationFromPosition(last_position + 1));
+
+// Test last position.
+assertEquals(last_position, script.locationFromPosition(last_position).position);
+assertEquals(last_line, script.locationFromPosition(last_position).line);
+assertEquals(last_column, script.locationFromPosition(last_position).column);
+
+// Test that script.sourceLine(line) works.
+var location;
+
+for (line = 0; line < num_lines_d; line++) {
+ var line_content_regexp = new RegExp(" x = " + (line + 1));
+ assertTrue(line_content_regexp.test(script.sourceLine(start_line_d + line)));
+}
diff --git a/deps/v8/test/mjsunit/debug-step-turbofan.js b/deps/v8/test/mjsunit/debug-step-turbofan.js
index c8c346b2c7..1710942e9a 100644
--- a/deps/v8/test/mjsunit/debug-step-turbofan.js
+++ b/deps/v8/test/mjsunit/debug-step-turbofan.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --turbo-filter=g --allow-natives-syntax
+// Flags: --expose-debug-as debug --turbo-filter=g --allow-natives-syntax
// Test that Debug::PrepareForBreakPoints can deal with turbofan code (g)
// on the stack. Without deoptimization support, we will not be able to
diff --git a/deps/v8/test/mjsunit/debug-stepframe-clearing.js b/deps/v8/test/mjsunit/debug-stepframe-clearing.js
new file mode 100644
index 0000000000..c440e78dd2
--- /dev/null
+++ b/deps/v8/test/mjsunit/debug-stepframe-clearing.js
@@ -0,0 +1,97 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// This test ensures that IC learning doesn't interfere with stepping into
+// property accessor. f1()'s ICs are allowed to learn to a monomorphic state,
+// and the breakpoints flooding get() are allowed to expire, then we ensure
+// that we can step into get() again later (when k == 1).
+function f1() {
+ for (var k = 0; k < 2; k++) { // Break 1
+ var v10 = 0; // Line 2
+ for (var i = 0; i < 10; i++) { // Line 3
+ var v12 = o.slappy; // Line 4
+ var v13 = 3 // Line 5
+ } // Line 6
+ print("break here"); // Break 3
+ } // Line 8
+ print("exiting f1"); // Line 9 (dummy break)
+}
+
+function get() {
+ var g0 = 0; // Break 2
+ var g1 = 1;
+ return 3;
+}
+
+
+var o = {};
+Object.defineProperty(o, "slappy", { get : get });
+
+Debug = debug.Debug;
+var break_count = 0
+var exception = null;
+var bp_f1_line7;
+var bp_f1_line9;
+
+function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ var line = exec_state.frame(0).sourceLineText();
+ print(line);
+ var match = line.match(/\/\/ Break (\d+)$/);
+ assertEquals(2, match.length);
+ var match_value = parseInt(match[1]);
+
+ if (break_count >= 0 && break_count < 2) {
+ // 0, 1: Keep stepping through frames.
+ assertEquals(break_count, match_value);
+ exec_state.prepareStep(Debug.StepAction.StepFrame, 1);
+ } else if (break_count === 2) {
+ // 2: let the code run to a breakpoint we set. The load should
+ // go monomorphic.
+ assertEquals(break_count, match_value);
+ } else if (break_count === 3) {
+ // 3: back to frame stepping. Does the monomorphic slappy accessor
+ // call still have the ability to break like before?
+ assertEquals(break_count, match_value);
+ Debug.clearBreakPoint(bp_f1_line7);
+ exec_state.prepareStep(Debug.StepAction.StepFrame, 1);
+ } else {
+ assertEquals(4, break_count);
+ assertEquals(2, match_value);
+ // Apparently we can still stop in the accessor even though we cleared
+ // breakpoints earlier and there was a monomorphic step.
+ // Allow running to completion now.
+ Debug.clearBreakPoint(bp_f1_line9);
+ }
+
+ break_count++;
+ } catch (e) {
+ print(e + e.stack);
+ exception = e;
+ }
+}
+
+for (var j = 1; j < 3; j++) {
+ break_count = 0;
+ Debug.setListener(listener);
+
+ // Breakpoints are added here rather than in the listener because their
+ // addition causes a full (clearing) gc that clears type feedback when we
+ // want to let it build up. Also, bp_f1_line9 is set simply because if we
+ // handled then deleted bp_f1_line7, then the debugger clears DebugInfo from
+ // f1 while we are still using it, again, resetting type feedback which is
+ // undesirable.
+ bp_f1_line7 = Debug.setBreakPoint(f1, 7);
+ bp_f1_line9 = Debug.setBreakPoint(f1, 9);
+
+ debugger; // Break 0
+ f1();
+ Debug.setListener(null);
+ assertTrue(break_count === 5);
+}
+
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/debug-stepin-foreach.js b/deps/v8/test/mjsunit/debug-stepin-foreach.js
index fa728e019c..c2702f794a 100644
--- a/deps/v8/test/mjsunit/debug-stepin-foreach.js
+++ b/deps/v8/test/mjsunit/debug-stepin-foreach.js
@@ -37,15 +37,17 @@ function listener(event, exec_state, event_data, data) {
};
Debug.setListener(listener);
+var bound_callback = callback.bind(null);
debugger; // Break 0.
[1,2].forEach(callback); // Break 1.
+[3,4].forEach(bound_callback); // Break 6.
function callback(x) {
- return x; // Break 2. // Break 4.
-} // Break 3. // Break 5.
+ return x; // Break 2. // Break 4. // Break 7. // Break 9.
+} // Break 3. // Break 5. // Break 8. // Break 10.
-assertNull(exception); // Break 6.
+assertNull(exception); // Break 11.
assertEquals(expected_breaks, break_count);
Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/harmony/block-conflicts.js b/deps/v8/test/mjsunit/es6/block-conflicts.js
index d19a34a2c3..fdd581dd70 100644
--- a/deps/v8/test/mjsunit/harmony/block-conflicts.js
+++ b/deps/v8/test/mjsunit/es6/block-conflicts.js
@@ -1,8 +1,6 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-scoping
// Test for conflicting variable bindings.
diff --git a/deps/v8/test/mjsunit/es6/block-const-assign.js b/deps/v8/test/mjsunit/es6/block-const-assign.js
new file mode 100644
index 0000000000..f78faa689d
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/block-const-assign.js
@@ -0,0 +1,160 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-computed-property-names
+
+// Test that we throw early syntax errors in harmony mode
+// when using an immutable binding in an assigment or with
+// prefix/postfix decrement/increment operators.
+
+"use strict";
+
+const decls = [
+ // Const declaration.
+ function(use) { return "const c = 1; " + use + ";" }, TypeError,
+ function(use) { return "const x = 0, c = 1; " + use + ";" }, TypeError,
+ function(use) { return "const c = 1, x = (" + use + ");" }, TypeError,
+ function(use) { return use + "; const c = 1;" }, ReferenceError,
+ function(use) { return use + "; const x = 0, c = 1;" }, ReferenceError,
+ function(use) { return "const x = (" + use + "), c = 1;" }, ReferenceError,
+ function(use) { return "const c = (" + use + ");" }, ReferenceError,
+
+ // Function expression.
+ function(use) { return "(function c() { " + use + "; })();"; }, TypeError,
+ // TODO(rossberg): Once we have default parameters, test using 'c' there.
+
+ // Class expression.
+ function(use) {
+ return "new class c { constructor() { " + use + " } };";
+ }, TypeError,
+ function(use) {
+ return "(new class c { m() { " + use + " } }).m();";
+ }, TypeError,
+ function(use) {
+ return "(new class c { get a() { " + use + " } }).a;";
+ }, TypeError,
+ function(use) {
+ return "(new class c { set a(x) { " + use + " } }).a = 0;";
+ }, TypeError,
+ function(use) {
+ return "(class c { static m() { " + use + " } }).s();";
+ }, TypeError,
+ function(use) {
+ return "(class c extends (" + use + ") {});";
+ }, ReferenceError,
+ function(use) {
+ return "(class c { [" + use + "]() {} });";
+ }, ReferenceError,
+ function(use) {
+ return "(class c { get [" + use + "]() {} });";
+ }, ReferenceError,
+ function(use) {
+ return "(class c { set [" + use + "](x) {} });";
+ }, ReferenceError,
+ function(use) {
+ return "(class c { static [" + use + "]() {} });";
+ }, ReferenceError,
+
+ // For loop.
+ function(use) {
+ return "for (const c = 0; " + use + ";) {}"
+ }, TypeError,
+ function(use) {
+ return "for (const x = 0, c = 0; " + use + ";) {}"
+ }, TypeError,
+ function(use) {
+ return "for (const c = 0; ; " + use + ") {}"
+ }, TypeError,
+ function(use) {
+ return "for (const x = 0, c = 0; ; " + use + ") {}"
+ }, TypeError,
+ function(use) {
+ return "for (const c = 0; ;) { " + use + "; }"
+ }, TypeError,
+ function(use) {
+ return "for (const x = 0, c = 0; ;) { " + use + "; }"
+ }, TypeError,
+ function(use) {
+ return "for (const c in {a: 1}) { " + use + "; }"
+ }, TypeError,
+ function(use) {
+ return "for (const c of [1]) { " + use + "; }"
+ }, TypeError,
+ function(use) {
+ return "for (const x = (" + use + "), c = 0; ;) {}"
+ }, ReferenceError,
+ function(use) {
+ return "for (const c = (" + use + "); ;) {}"
+ }, ReferenceError,
+]
+
+let uses = [
+ 'c = 1',
+ 'c += 1',
+ '++c',
+ 'c--',
+];
+
+let declcontexts = [
+ function(decl) { return decl; },
+ function(decl) { return "eval(\'" + decl + "\')"; },
+ function(decl) { return "{ " + decl + " }"; },
+ function(decl) { return "(function() { " + decl + " })()"; },
+];
+
+let usecontexts = [
+ function(use) { return use; },
+ function(use) { return "eval(\"" + use + "\")"; },
+ function(use) { return "(function() { " + use + " })()"; },
+ function(use) { return "(function() { eval(\"" + use + "\"); })()"; },
+ function(use) { return "eval(\"(function() { " + use + "; })\")()"; },
+];
+
+function Test(program, error) {
+ program = "'use strict'; " + program;
+ try {
+ print(program, " // throw " + error.name);
+ eval(program);
+ } catch (e) {
+ assertInstanceof(e, error);
+ if (e === TypeError) {
+ assertTrue(e.toString().indexOf("Assignment to constant variable") >= 0);
+ }
+ return;
+ }
+ assertUnreachable();
+}
+
+for (var d = 0; d < decls.length; d += 2) {
+ for (var u = 0; u < uses.length; ++u) {
+ for (var o = 0; o < declcontexts.length; ++o) {
+ for (var i = 0; i < usecontexts.length; ++i) {
+ Test(declcontexts[o](decls[d](usecontexts[i](uses[u]))), decls[d + 1]);
+ }
+ }
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/block-early-errors.js b/deps/v8/test/mjsunit/es6/block-early-errors.js
index 8ed5ea84ec..bf24942bb1 100644
--- a/deps/v8/test/mjsunit/harmony/block-early-errors.js
+++ b/deps/v8/test/mjsunit/es6/block-early-errors.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
-
function CheckException(e) {
var string = e.toString();
assertInstanceof(e, SyntaxError);
diff --git a/deps/v8/test/mjsunit/harmony/block-for.js b/deps/v8/test/mjsunit/es6/block-for.js
index 110f1ccf45..b91af0116c 100644
--- a/deps/v8/test/mjsunit/harmony/block-for.js
+++ b/deps/v8/test/mjsunit/es6/block-for.js
@@ -24,9 +24,6 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-scoping
-
"use strict";
function props(x) {
@@ -147,7 +144,7 @@ function closure_in_for_cond() {
assertEquals(k, a[k]());
}
}
-closure_in_for_next();
+closure_in_for_cond();
function closure_in_for_next() {
diff --git a/deps/v8/test/mjsunit/harmony/block-leave.js b/deps/v8/test/mjsunit/es6/block-leave.js
index 87d35b396d..338631b76e 100644
--- a/deps/v8/test/mjsunit/harmony/block-leave.js
+++ b/deps/v8/test/mjsunit/es6/block-leave.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
-
"use strict";
// We want to test the context chain shape. In each of the tests cases
diff --git a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js b/deps/v8/test/mjsunit/es6/block-let-crankshaft.js
index e8e00b200e..9cfdf847fc 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
+++ b/deps/v8/test/mjsunit/es6/block-let-crankshaft.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping --allow-natives-syntax
+// Flags: --allow-natives-syntax
"use strict";
diff --git a/deps/v8/test/mjsunit/harmony/block-let-declaration.js b/deps/v8/test/mjsunit/es6/block-let-declaration.js
index 44a0049a44..5fbb12824b 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-declaration.js
+++ b/deps/v8/test/mjsunit/es6/block-let-declaration.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
-
// Test let declarations in various settings.
"use strict";
@@ -70,8 +68,8 @@ TestLocalThrows("do let x; while (false)", SyntaxError);
TestLocalThrows("while (false) let x;", SyntaxError);
TestLocalThrows("label: let x;", SyntaxError);
TestLocalThrows("for (;false;) let x;", SyntaxError);
-TestLocalThrows("switch (true) { case true: let x; }", SyntaxError);
-TestLocalThrows("switch (true) { default: let x; }", SyntaxError);
+TestLocalDoesNotThrow("switch (true) { case true: let x; }");
+TestLocalDoesNotThrow("switch (true) { default: let x; }");
// Test const declarations with initialisers in statement positions.
TestLocalThrows("if (true) const x = 1;", SyntaxError);
@@ -80,8 +78,8 @@ TestLocalThrows("do const x = 1; while (false)", SyntaxError);
TestLocalThrows("while (false) const x = 1;", SyntaxError);
TestLocalThrows("label: const x = 1;", SyntaxError);
TestLocalThrows("for (;false;) const x = 1;", SyntaxError);
-TestLocalThrows("switch (true) { case true: const x = 1; }", SyntaxError);
-TestLocalThrows("switch (true) { default: const x = 1; }", SyntaxError);
+TestLocalDoesNotThrow("switch (true) { case true: const x = 1; }");
+TestLocalDoesNotThrow("switch (true) { default: const x = 1; }");
// Test const declarations without initialisers.
TestLocalThrows("const x;", SyntaxError);
@@ -149,11 +147,11 @@ function f() {
f();
// Test function declarations in statement position in strict mode.
-TestLocalThrows("function f() { if (true) function g() {}", SyntaxError);
-TestLocalThrows("function f() { if (true) {} else function g() {}", SyntaxError);
-TestLocalThrows("function f() { do function g() {} while (false)", SyntaxError);
-TestLocalThrows("function f() { while (false) function g() {}", SyntaxError);
-TestLocalThrows("function f() { label: function g() {}", SyntaxError);
-TestLocalThrows("function f() { for (;false;) function g() {}", SyntaxError);
-TestLocalThrows("function f() { switch (true) { case true: function g() {} }", SyntaxError);
-TestLocalThrows("function f() { switch (true) { default: function g() {} }", SyntaxError);
+TestLocalThrows("function f() { if (true) function g() {} }", SyntaxError);
+TestLocalThrows("function f() { if (true) {} else function g() {} }", SyntaxError);
+TestLocalThrows("function f() { do function g() {} while (false) }", SyntaxError);
+TestLocalThrows("function f() { while (false) function g() {} }", SyntaxError);
+TestLocalThrows("function f() { label: function g() {} }", SyntaxError);
+TestLocalThrows("function f() { for (;false;) function g() {} }", SyntaxError);
+TestLocalDoesNotThrow("function f() { switch (true) { case true: function g() {} } }");
+TestLocalDoesNotThrow("function f() { switch (true) { default: function g() {} } }");
diff --git a/deps/v8/test/mjsunit/harmony/block-let-semantics.js b/deps/v8/test/mjsunit/es6/block-let-semantics.js
index a37b795b0a..b0a826a007 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-semantics.js
+++ b/deps/v8/test/mjsunit/es6/block-let-semantics.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
-
"use strict";
// Test temporal dead zone semantics of let bound variables in
diff --git a/deps/v8/test/mjsunit/harmony/block-non-strict-errors.js b/deps/v8/test/mjsunit/es6/block-non-strict-errors.js
index 11fa5c6a52..48cac21141 100644
--- a/deps/v8/test/mjsunit/harmony/block-non-strict-errors.js
+++ b/deps/v8/test/mjsunit/es6/block-non-strict-errors.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-scoping --harmony-classes
+// Flags: --harmony-classes
function CheckError(source) {
var exception = null;
diff --git a/deps/v8/test/mjsunit/harmony/block-scoping.js b/deps/v8/test/mjsunit/es6/block-scoping.js
index 001d9fbfd5..5f481b8bf2 100644
--- a/deps/v8/test/mjsunit/harmony/block-scoping.js
+++ b/deps/v8/test/mjsunit/es6/block-scoping.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --harmony-scoping
+// Flags: --allow-natives-syntax
// Test functionality of block scopes.
"use strict";
@@ -101,7 +101,8 @@ function f4(one) {
assertEquals(4, eval('z'));
assertEquals(5, eval('u'));
assertEquals(6, eval('v'));
- };
+ }
+ f();
}
}
f4(1);
@@ -122,7 +123,8 @@ function f5(one) {
assertEquals(4, z);
assertEquals(5, u);
assertEquals(6, v);
- };
+ }
+ f();
}
}
f5(1);
diff --git a/deps/v8/test/mjsunit/es6/collections.js b/deps/v8/test/mjsunit/es6/collections.js
index 92cd087839..888b6863c1 100644
--- a/deps/v8/test/mjsunit/es6/collections.js
+++ b/deps/v8/test/mjsunit/es6/collections.js
@@ -51,6 +51,21 @@ function TestValidMapCalls(m) {
assertDoesNotThrow(function () { m.set(new Object) });
assertDoesNotThrow(function () { m.has(new Object) });
assertDoesNotThrow(function () { m.delete(new Object) });
+ assertDoesNotThrow(function () { m.get(undefined) });
+ assertDoesNotThrow(function () { m.get(null) });
+ assertDoesNotThrow(function () { m.get(0) });
+ assertDoesNotThrow(function () { m.get('a-key') });
+ assertDoesNotThrow(function () { m.get(Symbol()) });
+ assertDoesNotThrow(function () { m.has(undefined) });
+ assertDoesNotThrow(function () { m.has(null) });
+ assertDoesNotThrow(function () { m.has(0) });
+ assertDoesNotThrow(function () { m.has('a-key') });
+ assertDoesNotThrow(function () { m.has(Symbol()) });
+ assertDoesNotThrow(function () { m.delete(undefined) });
+ assertDoesNotThrow(function () { m.delete(null) });
+ assertDoesNotThrow(function () { m.delete(0) });
+ assertDoesNotThrow(function () { m.delete('a-key') });
+ assertDoesNotThrow(function () { m.delete(Symbol()) });
}
TestValidMapCalls(new Map);
TestValidMapCalls(new WeakMap);
@@ -58,14 +73,11 @@ TestValidMapCalls(new WeakMap);
// Test invalid getter and setter calls for WeakMap only
function TestInvalidCalls(m) {
- assertThrows(function () { m.get(undefined) }, TypeError);
assertThrows(function () { m.set(undefined, 0) }, TypeError);
- assertThrows(function () { m.get(null) }, TypeError);
assertThrows(function () { m.set(null, 0) }, TypeError);
- assertThrows(function () { m.get(0) }, TypeError);
assertThrows(function () { m.set(0, 0) }, TypeError);
- assertThrows(function () { m.get('a-key') }, TypeError);
assertThrows(function () { m.set('a-key', 0) }, TypeError);
+ assertThrows(function () { m.set(Symbol(), 0) }, TypeError);
}
TestInvalidCalls(new WeakMap);
@@ -73,57 +85,79 @@ TestInvalidCalls(new WeakMap);
// Test expected behavior for Sets and WeakSets
function TestSet(set, key) {
assertFalse(set.has(key));
- assertSame(set, set.add(key));
- assertTrue(set.has(key));
- assertTrue(set.delete(key));
+ assertFalse(set.delete(key));
+ if (typeof key === 'object' && !(set instanceof WeakSet)) {
+ assertSame(set, set.add(key));
+ assertTrue(set.has(key));
+ assertTrue(set.delete(key));
+ }
assertFalse(set.has(key));
assertFalse(set.delete(key));
assertFalse(set.has(key));
}
function TestSetBehavior(set) {
+ // Fill
for (var i = 0; i < 20; i++) {
TestSet(set, new Object);
TestSet(set, i);
TestSet(set, i / 100);
TestSet(set, 'key-' + i);
+ TestSet(set, Symbol(i));
}
- var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
+
+ var keys = [
+ -0, +0, 1, 1/3, 10, +Infinity, -Infinity, NaN, true, false, null, undefined,
+ 'x', Symbol(), {}, function(){}
+ ];
for (var i = 0; i < keys.length; i++) {
TestSet(set, keys[i]);
}
}
TestSetBehavior(new Set);
-TestSet(new WeakSet, new Object);
+TestSetBehavior(new WeakSet);
// Test expected mapping behavior for Maps and WeakMaps
function TestMapping(map, key, value) {
- assertSame(map, map.set(key, value));
- assertSame(value, map.get(key));
+ assertFalse(map.has(key));
+ assertSame(undefined, map.get(key));
+ assertFalse(map.delete(key));
+ if (typeof key === 'object' && !(map instanceof WeakMap)) {
+ assertSame(map, map.set(key, value));
+ assertSame(value, map.get(key));
+ assertTrue(map.has(key));
+ assertTrue(map.delete(key));
+ }
+ assertFalse(map.has(key));
+ assertSame(undefined, map.get(key));
+ assertFalse(map.delete(key));
+ assertFalse(map.has(key));
+ assertSame(undefined, map.get(key));
}
-function TestMapBehavior1(m) {
+function TestMapBehavior(m) {
+ // Fill
TestMapping(m, new Object, 23);
TestMapping(m, new Object, 'the-value');
TestMapping(m, new Object, new Object);
-}
-TestMapBehavior1(new Map);
-TestMapBehavior1(new WeakMap);
-
-
-// Test expected mapping behavior for Maps only
-function TestMapBehavior2(m) {
for (var i = 0; i < 20; i++) {
TestMapping(m, i, new Object);
TestMapping(m, i / 10, new Object);
TestMapping(m, 'key-' + i, new Object);
+ TestMapping(m, Symbol(i), new Object);
}
- // -0 is handled in TestMinusZeroMap
- var keys = [ 0, +Infinity, -Infinity, true, false, null, undefined ];
+
+ var keys = [
+ -0, +0, 1, 1/3, 10, +Infinity, -Infinity, NaN, true, false, null, undefined,
+ 'x', Symbol(), {}, function(){}
+ ];
for (var i = 0; i < keys.length; i++) {
+ TestMapping(m, keys[i], 23);
+ TestMapping(m, keys[i], 'the-value');
TestMapping(m, keys[i], new Object);
}
}
-TestMapBehavior2(new Map);
+TestMapBehavior(new Map);
+TestMapBehavior(new WeakMap);
// Test expected querying behavior of Maps and WeakMaps
@@ -132,8 +166,6 @@ function TestQuery(m) {
var values = [ 'x', 0, +Infinity, -Infinity, true, false, null, undefined ];
for (var i = 0; i < values.length; i++) {
TestMapping(m, key, values[i]);
- assertTrue(m.has(key));
- assertFalse(m.has(new Object));
}
}
TestQuery(new Map);
@@ -144,7 +176,6 @@ TestQuery(new WeakMap);
function TestDelete(m) {
var key = new Object;
TestMapping(m, key, 'to-be-deleted');
- assertTrue(m.delete(key));
assertFalse(m.delete(key));
assertFalse(m.delete(new Object));
assertSame(m.get(key), undefined);
@@ -1190,8 +1221,9 @@ function TestSetConstructorIterableValue(ctor) {
// Strict mode is required to prevent implicit wrapping in the getter.
Object.defineProperty(Number.prototype, Symbol.iterator, {
get: function() {
- assertEquals('object', typeof this);
+ assertEquals('number', typeof this);
return function() {
+ assertEquals('number', typeof this);
return oneAndTwo.keys();
};
},
@@ -1380,8 +1412,9 @@ function TestMapConstructorIterableValue(ctor) {
// Strict mode is required to prevent implicit wrapping in the getter.
Object.defineProperty(Number.prototype, Symbol.iterator, {
get: function() {
- assertEquals('object', typeof this);
+ assertEquals('number', typeof this);
return function() {
+ assertEquals('number', typeof this);
return oneAndTwo.entries();
};
},
@@ -1406,3 +1439,38 @@ TestCollectionToString(Map);
TestCollectionToString(Set);
TestCollectionToString(WeakMap);
TestCollectionToString(WeakSet);
+
+
+function TestConstructorOrderOfAdderIterator(ctor, adderName) {
+ var iterable = new Map();
+ iterable.set({}, {});
+ iterable.set({}, {});
+ var iterableFunction = iterable[Symbol.iterator];
+ Object.defineProperty(iterable, Symbol.iterator, {
+ get: function() {
+ log += 'iterator';
+ return iterableFunction;
+ }
+ });
+
+ var log = '';
+ var adderFunction = ctor.prototype[adderName];
+
+ Object.defineProperty(ctor.prototype, adderName, {
+ get: function() {
+ log += adderName;
+ return adderFunction;
+ }
+ });
+
+ new ctor(iterable);
+ assertEquals(adderName + 'iterator', log);
+
+ Object.defineProperty(ctor.prototype, adderName, {
+ value: adderFunction
+ });
+}
+TestConstructorOrderOfAdderIterator(Map, 'set');
+TestConstructorOrderOfAdderIterator(Set, 'add');
+TestConstructorOrderOfAdderIterator(WeakMap, 'set');
+TestConstructorOrderOfAdderIterator(WeakSet, 'add');
diff --git a/deps/v8/test/mjsunit/harmony/debug-blockscopes.js b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
index 8180377e6d..9f5d13e900 100644
--- a/deps/v8/test/mjsunit/harmony/debug-blockscopes.js
+++ b/deps/v8/test/mjsunit/es6/debug-blockscopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --harmony-scoping
+// Flags: --expose-debug-as debug
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
diff --git a/deps/v8/test/mjsunit/harmony/debug-evaluate-blockscopes.js b/deps/v8/test/mjsunit/es6/debug-evaluate-blockscopes.js
index d133cc07ce..e24ca78315 100644
--- a/deps/v8/test/mjsunit/harmony/debug-evaluate-blockscopes.js
+++ b/deps/v8/test/mjsunit/es6/debug-evaluate-blockscopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --harmony-scoping
+// Flags: --expose-debug-as debug
// Test debug evaluation for functions without local context, but with
// nested catch contexts.
diff --git a/deps/v8/test/mjsunit/harmony/debug-function-scopes.js b/deps/v8/test/mjsunit/es6/debug-function-scopes.js
index 1b380c2b84..699bd5343d 100644
--- a/deps/v8/test/mjsunit/harmony/debug-function-scopes.js
+++ b/deps/v8/test/mjsunit/es6/debug-function-scopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-debug-as debug --harmony-scoping
+// Flags: --expose-debug-as debug
"use strict";
let top_level_let = 255;
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js b/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js
new file mode 100644
index 0000000000..918ae2a2e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-aborted-try-finally.js
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we listen to all exceptions and
+// there is a catch handler for the exception thrown in a Promise.
+// We expect a normal Exception debug event to be triggered.
+
+Debug = debug.Debug;
+
+var events = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.PromiseEvent) events.push(event_data.status());
+}
+
+Debug.setListener(listener);
+
+var p = new Promise(function(resolve, reject) {
+ do {
+ try {
+ throw new Error("reject");
+ } finally {
+ break; // No rethrow.
+ }
+ } while (false);
+ resolve();
+});
+
+assertEquals([0 /* create */, 1 /* resolve */], events);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js b/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js
new file mode 100644
index 0000000000..298201f103
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-promises/resolve-after-try-catch.js
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we listen to all exceptions and
+// there is a catch handler for the exception thrown in a Promise.
+// We expect a normal Exception debug event to be triggered.
+
+Debug = debug.Debug;
+
+var events = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.PromiseEvent) events.push(event_data.status());
+}
+
+Debug.setListener(listener);
+
+var p = new Promise(function (resolve, reject) {
+ try {
+ throw new Error("reject");
+ } catch (e) {
+ }
+ resolve();
+});
+
+assertEquals([0 /* create */, 1 /* resolve */], events);
diff --git a/deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js b/deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js
new file mode 100644
index 0000000000..b1e2ff98e1
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-promises/rethrow-in-try-finally.js
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we listen to all exceptions and
+// there is a catch handler for the exception thrown in a Promise.
+// We expect a normal Exception debug event to be triggered.
+
+Debug = debug.Debug;
+
+var events = [];
+
+function listener(event, exec_state, event_data, data) {
+ if (event == Debug.DebugEvent.PromiseEvent) events.push(event_data.status());
+}
+
+Debug.setListener(listener);
+
+var p = new Promise(function(resolve, reject) {
+ try {
+ throw new Error("reject");
+ } finally {
+ // Implicit rethrow.
+ }
+ resolve();
+});
+
+assertEquals([0 /* create */, -1 /* rethrown */], events);
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-promises.js b/deps/v8/test/mjsunit/es6/debug-stepin-promises.js
new file mode 100644
index 0000000000..8548a2badd
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/debug-stepin-promises.js
@@ -0,0 +1,65 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax --noalways-opt
+// Tests stepping into through Promises.
+
+Debug = debug.Debug
+var exception = null;
+var break_count = 0;
+var expected_breaks = -1;
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
+ if (!break_count) {
+ // Count number of expected breakpoints in this source file.
+ var source_text = exec_state.frame(0).func().script().source();
+ expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
+ print("Expected breaks: " + expected_breaks);
+ }
+ var source = exec_state.frame(0).sourceLineText();
+ print("paused at: " + source);
+ assertTrue(source.indexOf("// Break " + break_count + ".") > 0,
+ "Unexpected pause at: " + source + "\n" +
+ "Expected: // Break " + break_count + ".");
+ ++break_count;
+ if (break_count !== expected_breaks) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ }
+ }
+ } catch(e) {
+ exception = e;
+ print(e, e.stack);
+ }
+};
+
+Debug.setListener(listener);
+
+Promise.resolve(42)
+ .then(
+ function f0() {
+ debugger; // Break 0.
+ } // Break 1.
+ )
+ .then(callback)
+ .then(callback.bind(null))
+ .then(Object)
+ .then(callback.bind(null).bind(null))
+ .then(finalize)
+ .catch(function(err) {
+ %AbortJS("FAIL: " + err);
+ });
+
+function callback(x) {
+ return x; // Break 2. // Break 4. // Break 6.
+} // Break 3. // Break 5. // Break 7.
+
+function finalize() {
+ assertNull(exception); // Break 8.
+ assertEquals(expected_breaks, break_count);
+
+ Debug.setListener(null);
+}
diff --git a/deps/v8/test/mjsunit/harmony/empty-for.js b/deps/v8/test/mjsunit/es6/empty-for.js
index 02211260ff..dad892d328 100644
--- a/deps/v8/test/mjsunit/harmony/empty-for.js
+++ b/deps/v8/test/mjsunit/es6/empty-for.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
-
"use strict";
function for_const() {
diff --git a/deps/v8/test/mjsunit/es6/function-length-configurable.js b/deps/v8/test/mjsunit/es6/function-length-configurable.js
new file mode 100644
index 0000000000..e5b51aba20
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/function-length-configurable.js
@@ -0,0 +1,119 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function getStrictF() {
+ 'use strict';
+ return function f(x) {};
+}
+
+
+function getSloppyF() {
+ return function f(x) {};
+}
+
+
+function getStrictGenerator() {
+ 'use strict';
+ return function* f(x) {};
+}
+
+
+function getSloppyGenerator() {
+ return function* f(x) {};
+}
+
+
+function test(testFunction) {
+ testFunction(getStrictF());
+ testFunction(getSloppyF());
+ testFunction(getStrictGenerator());
+ testFunction(getSloppyGenerator());
+}
+
+
+function testDescriptor(f) {
+ var descr = Object.getOwnPropertyDescriptor(f, 'length');
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertEquals(1, descr.value);
+ assertFalse(descr.writable);
+}
+test(testDescriptor);
+
+
+function testSet(f) {
+ f.length = 2;
+ assertEquals(1, f.length);
+}
+test(testSet);
+
+
+function testSetStrict(f) {
+ 'use strict';
+ assertThrows(function() {
+ f.length = 2;
+ }, TypeError);
+}
+test(testSetStrict);
+
+
+function testReconfigureAsDataProperty(f) {
+ Object.defineProperty(f, 'length', {
+ value: 2,
+ });
+ assertEquals(2, f.length);
+ Object.defineProperty(f, 'length', {
+ writable: true
+ });
+ f.length = 3;
+ assertEquals(3, f.length);
+
+ f.length = 42;
+ assertEquals(42, f.length);
+}
+test(testReconfigureAsDataProperty);
+
+
+function testReconfigureAsAccessorProperty(f) {
+ var length = 2;
+ Object.defineProperty(f, 'length', {
+ get: function() { return length; },
+ set: function(v) { length = v; }
+ });
+ assertEquals(2, f.length);
+ f.length = 3;
+ assertEquals(3, f.length);
+}
+test(testReconfigureAsAccessorProperty);
+
+
+(function testSetOnInstance() {
+ // This needs to come before testDelete below
+ assertTrue(Function.prototype.hasOwnProperty('length'));
+
+ function f() {}
+ delete f.length;
+ assertEquals(0, f.length);
+
+ f.length = 42;
+ assertEquals(0, f.length); // non writable prototype property.
+ assertFalse(f.hasOwnProperty('length'));
+
+ Object.defineProperty(Function.prototype, 'length', {writable: true});
+
+ f.length = 123;
+ assertTrue(f.hasOwnProperty('length'));
+ assertEquals(123, f.length);
+})();
+
+
+(function testDelete() {
+ function f(x) {}
+ assertTrue(delete f.length);
+ assertFalse(f.hasOwnProperty('length'));
+ assertEquals(0, f.length);
+
+ assertTrue(delete Function.prototype.length);
+ assertEquals(undefined, f.length);
+})();
diff --git a/deps/v8/test/mjsunit/es6/function-name-configurable.js b/deps/v8/test/mjsunit/es6/function-name-configurable.js
new file mode 100644
index 0000000000..f0ff406da8
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/function-name-configurable.js
@@ -0,0 +1,115 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function getStrictF() {
+ 'use strict';
+ return function f() {};
+}
+
+
+function getSloppyF() {
+ return function f() {};
+}
+
+
+function test(testFunction) {
+ testFunction(getStrictF());
+ testFunction(getSloppyF());
+}
+
+
+function testDescriptor(f) {
+ var descr = Object.getOwnPropertyDescriptor(f, 'name');
+ assertTrue(descr.configurable);
+ assertFalse(descr.enumerable);
+ assertEquals('f', descr.value);
+ assertFalse(descr.writable);
+}
+test(testDescriptor);
+
+
+function testSet(f) {
+ f.name = 'g';
+ assertEquals('f', f.name);
+}
+test(testSet);
+
+
+function testSetStrict(f) {
+ 'use strict';
+ assertThrows(function() {
+ f.name = 'g';
+ }, TypeError);
+}
+test(testSetStrict);
+
+
+function testReconfigureAsDataProperty(f) {
+ Object.defineProperty(f, 'name', {
+ value: 'g',
+ });
+ assertEquals('g', f.name);
+ Object.defineProperty(f, 'name', {
+ writable: true
+ });
+ f.name = 'h';
+ assertEquals('h', f.name);
+
+ f.name = 42;
+ assertEquals(42, f.name);
+}
+test(testReconfigureAsDataProperty);
+
+
+function testReconfigureAsAccessorProperty(f) {
+ var name = 'g';
+ Object.defineProperty(f, 'name', {
+ get: function() { return name; },
+ set: function(v) { name = v; }
+ });
+ assertEquals('g', f.name);
+ f.name = 'h';
+ assertEquals('h', f.name);
+}
+test(testReconfigureAsAccessorProperty);
+
+
+function testFunctionToString(f) {
+ Object.defineProperty(f, 'name', {
+ value: {toString: function() { assertUnreachable(); }},
+ });
+ assertEquals('function f() {}', f.toString());
+}
+test(testFunctionToString);
+
+
+(function testSetOnInstance() {
+ // This needs to come before testDelete below
+ assertTrue(Function.prototype.hasOwnProperty('name'));
+
+ function f() {}
+ delete f.name;
+ assertEquals('Empty', f.name);
+
+ f.name = 42;
+ assertEquals('Empty', f.name); // non writable prototype property.
+ assertFalse(f.hasOwnProperty('name'));
+
+ Object.defineProperty(Function.prototype, 'name', {writable: true});
+
+ f.name = 123;
+ assertTrue(f.hasOwnProperty('name'));
+ assertEquals(123, f.name);
+})();
+
+
+(function testDelete() {
+ function f() {}
+ assertTrue(delete f.name);
+ assertFalse(f.hasOwnProperty('name'));
+ assertEquals('Empty', f.name);
+
+ assertTrue(delete Function.prototype.name);
+ assertEquals(undefined, f.name);
+})();
diff --git a/deps/v8/test/mjsunit/es6/generators-debug-liveedit.js b/deps/v8/test/mjsunit/es6/generators-debug-liveedit.js
index 6f0c443afc..987a42c41c 100644
--- a/deps/v8/test/mjsunit/es6/generators-debug-liveedit.js
+++ b/deps/v8/test/mjsunit/es6/generators-debug-liveedit.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --allow-natives-syntax
var Debug = debug.Debug;
var LiveEdit = Debug.LiveEdit;
@@ -54,7 +54,7 @@ function patch(fun, from, to) {
print("Change log: " + JSON.stringify(log) + "\n");
}
}
- Debug.ExecuteInDebugContext(debug, false);
+ %ExecuteInDebugContext(debug);
}
// Try to edit a MakeGenerator while it's running, then again while it's
diff --git a/deps/v8/test/mjsunit/es6/generators-objects.js b/deps/v8/test/mjsunit/es6/generators-objects.js
index 8039ca8bb1..5112443ea0 100644
--- a/deps/v8/test/mjsunit/es6/generators-objects.js
+++ b/deps/v8/test/mjsunit/es6/generators-objects.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping --allow-natives-syntax --harmony-tostring
+// Flags: --allow-natives-syntax --harmony-tostring
// Test instantations of generators.
diff --git a/deps/v8/test/mjsunit/es6/indexed-integer-exotics.js b/deps/v8/test/mjsunit/es6/indexed-integer-exotics.js
new file mode 100644
index 0000000000..7aefd78c4f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/indexed-integer-exotics.js
@@ -0,0 +1,63 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+Object.prototype["10"] = "unreachable";
+Object.prototype["7"] = "unreachable";
+Object.prototype["-1"] = "unreachable";
+Object.prototype["-0"] = "unreachable";
+Object.prototype["4294967296"] = "unreachable";
+
+var array = new Int32Array(10);
+
+function check() {
+ for (var i = 0; i < 4; i++) {
+ assertEquals(undefined, array["-1"]);
+ assertEquals(undefined, array["-0"]);
+ assertEquals(undefined, array["10"]);
+ assertEquals(undefined, array["4294967296"]);
+ }
+ assertEquals("unreachable", array.__proto__["-1"]);
+ assertEquals("unreachable", array.__proto__["-0"]);
+ assertEquals("unreachable", array.__proto__["10"]);
+ assertEquals("unreachable", array.__proto__["4294967296"]);
+}
+
+check();
+
+array["-1"] = "unreachable";
+array["-0"] = "unreachable";
+array["10"] = "unreachable";
+array["4294967296"] = "unreachable";
+
+check();
+
+delete array["-0"];
+delete array["-1"];
+delete array["10"];
+delete array["4294967296"];
+
+assertEquals(undefined, Object.getOwnPropertyDescriptor(array, "-1"));
+assertEquals(undefined, Object.getOwnPropertyDescriptor(array, "-0"));
+assertEquals(undefined, Object.getOwnPropertyDescriptor(array, "10"));
+assertEquals(undefined, Object.getOwnPropertyDescriptor(array, "4294967296"));
+assertEquals(10, Object.keys(array).length);
+
+check();
+
+function f() { return array["-1"]; }
+
+for (var i = 0; i < 3; i++) {
+ assertEquals(undefined, f());
+}
+%OptimizeFunctionOnNextCall(f);
+assertEquals(undefined, f());
+
+Object.defineProperty(new Int32Array(), "-1", {'value': 1});
+Object.defineProperty(new Int32Array(), "-0", {'value': 1});
+Object.defineProperty(new Int32Array(), "-10", {'value': 1});
+Object.defineProperty(new Int32Array(), "4294967296", {'value': 1});
+
+check();
diff --git a/deps/v8/test/mjsunit/es6/iteration-semantics.js b/deps/v8/test/mjsunit/es6/iteration-semantics.js
index 544c94d915..f29e6e011b 100644
--- a/deps/v8/test/mjsunit/es6/iteration-semantics.js
+++ b/deps/v8/test/mjsunit/es6/iteration-semantics.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping --harmony-proxies
+// Flags: --harmony-proxies
// Test for-of semantics.
@@ -200,9 +200,11 @@ assertEquals([undefined, 1, 2, 3],
// Done.
{ value: 4, done: 42 }])));
// Results that are not objects.
-assertEquals([undefined, undefined, undefined],
- fold(append, [],
- results([10, "foo", /qux/, { value: 37, done: true }])));
+assertThrows(function() {
+ assertEquals([undefined, undefined, undefined],
+ fold(append, [],
+ results([10, "foo", /qux/, { value: 37, done: true }])));
+}, TypeError);
// Getters (shudder).
assertEquals([1, 2],
fold(append, [],
@@ -334,3 +336,25 @@ function poison_proxy_after(iterable, n) {
}));
}
assertEquals(45, fold(sum, 0, poison_proxy_after(integers_until(10), 10)));
+
+
+function test_iterator_result_object_non_object(value, descr) {
+ var arr = [];
+ var ex;
+ var message = 'Iterator result ' + (descr || value) + ' is not an object';
+ try {
+ fold(append, arr,
+ results([{value: 1}, {}, value, {value: 2}, {done: true}]));
+ } catch (e) {
+ ex = e;
+ }
+ assertInstanceof(ex, TypeError);
+ assertEquals(message, ex.message);
+ assertArrayEquals([1, undefined], arr);
+}
+test_iterator_result_object_non_object(null);
+test_iterator_result_object_non_object(undefined);
+test_iterator_result_object_non_object(42);
+test_iterator_result_object_non_object('abc');
+test_iterator_result_object_non_object(false);
+test_iterator_result_object_non_object(Symbol('x'), 'Symbol(x)');
diff --git a/deps/v8/test/mjsunit/es6/iteration-syntax.js b/deps/v8/test/mjsunit/es6/iteration-syntax.js
index 4be94c5db4..c6a7d477bc 100644
--- a/deps/v8/test/mjsunit/es6/iteration-syntax.js
+++ b/deps/v8/test/mjsunit/es6/iteration-syntax.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
-
// Test for-of syntax.
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/map-minus-zero.js b/deps/v8/test/mjsunit/es6/map-minus-zero.js
new file mode 100644
index 0000000000..f9f397ec5c
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/map-minus-zero.js
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+var map = new Map();
+
+var objectKey = {};
+var stringKey = 'keykeykey';
+var numberKey = 42.24;
+var booleanKey = true;
+var undefinedKey = undefined;
+var nullKey = null;
+var nanKey = NaN;
+var zeroKey = 0;
+var minusZeroKey = -0;
+
+assertEquals(map.size, 0);
+
+map.set(objectKey, 'aaa');
+map.set(stringKey, 'bbb');
+map.set(numberKey, 'ccc');
+map.set(booleanKey, 'ddd');
+map.set(undefinedKey, 'eee');
+map.set(nullKey, 'fff');
+map.set(nanKey, 'ggg');
+map.set(zeroKey, 'hhh');
+
+assertEquals(8, map.size);
+
+assertEquals('aaa', map.get(objectKey));
+assertEquals('bbb', map.get(stringKey));
+assertEquals('ccc', map.get(numberKey));
+assertEquals('ddd', map.get(booleanKey));
+assertEquals('eee', map.get(undefinedKey));
+assertEquals('fff', map.get(nullKey));
+assertEquals('ggg', map.get(nanKey));
+assertEquals('hhh', map.get(zeroKey));
+
+assertEquals(undefined, map.get({}));
+assertEquals('bbb', map.get('keykeykey'));
+assertEquals('ccc', map.get(42.24));
+assertEquals('ddd', map.get(true));
+assertEquals('eee', map.get(undefined));
+assertEquals('fff', map.get(null));
+assertEquals('ggg', map.get(NaN));
+assertEquals('hhh', map.get(0));
+assertEquals('hhh', map.get(-0));
+assertEquals('hhh', map.get(1 / Infinity));
+assertEquals('hhh', map.get(-1 / Infinity));
diff --git a/deps/v8/test/mjsunit/es6/promises.js b/deps/v8/test/mjsunit/es6/promises.js
index 04059aa720..63b6d2f94a 100644
--- a/deps/v8/test/mjsunit/es6/promises.js
+++ b/deps/v8/test/mjsunit/es6/promises.js
@@ -32,6 +32,8 @@ var call = Function.prototype.call.call.bind(Function.prototype.call)
var observe = Object.observe;
var getOwnPropertyNames = Object.getOwnPropertyNames;
var defineProperty = Object.defineProperty;
+var numberPrototype = Number.prototype;
+var symbolIterator = Symbol.iterator;
(function() {
@@ -637,14 +639,6 @@ function assertAsyncDone(iteration) {
})();
(function() {
- Promise.all({}).chain(
- assertUnreachable,
- function(r) { assertAsync(r instanceof TypeError, "all/no-array") }
- )
- assertAsyncRan()
-})();
-
-(function() {
Promise.all([]).chain(
function(x) { assertAsync(x.length === 0, "all/resolve/empty") },
assertUnreachable
@@ -653,6 +647,45 @@ function assertAsyncDone(iteration) {
})();
(function() {
+ function testPromiseAllNonIterable(value) {
+ Promise.all(value).chain(
+ assertUnreachable,
+ function(r) {
+ assertAsync(r instanceof TypeError, 'all/non iterable');
+ });
+ assertAsyncRan();
+ }
+ testPromiseAllNonIterable(null);
+ testPromiseAllNonIterable(undefined);
+ testPromiseAllNonIterable({});
+ testPromiseAllNonIterable(42);
+})();
+
+(function() {
+ var deferred = Promise.defer();
+ var p = deferred.promise;
+ function* f() {
+ yield 1;
+ yield p;
+ yield 3;
+ }
+ Promise.all(f()).chain(
+ function(x) {
+ assertAsync(x.length === 3, "all/resolve/iterable");
+ assertAsync(x[0] === 1, "all/resolve/iterable/0");
+ assertAsync(x[1] === 2, "all/resolve/iterable/1");
+ assertAsync(x[2] === 3, "all/resolve/iterable/2");
+ },
+ assertUnreachable);
+ deferred.resolve(2);
+ assertAsyncRan();
+ assertAsyncRan();
+ assertAsyncRan();
+ assertAsyncRan();
+})();
+
+
+(function() {
var deferred1 = Promise.defer()
var p1 = deferred1.promise
var deferred2 = Promise.defer()
@@ -706,6 +739,52 @@ function assertAsyncDone(iteration) {
assertAsyncRan()
})();
+
+(function() {
+ 'use strict';
+ var getCalls = 0;
+ var funcCalls = 0;
+ var nextCalls = 0;
+ defineProperty(numberPrototype, symbolIterator, {
+ get: function() {
+ assertEquals('number', typeof this);
+ getCalls++;
+ return function() {
+ assertEquals('number', typeof this);
+ funcCalls++;
+ var n = this;
+ var i = 0
+ return {
+ next() {
+ nextCalls++;
+ return {value: i++, done: i > n};
+ }
+ };
+ };
+ },
+ configurable: true
+ });
+
+ Promise.all(3).chain(
+ function(x) {
+ assertAsync(x.length === 3, "all/iterable/number/length");
+ assertAsync(x[0] === 0, "all/iterable/number/0");
+ assertAsync(x[1] === 1, "all/iterable/number/1");
+ assertAsync(x[2] === 2, "all/iterable/number/2");
+ },
+ assertUnreachable);
+ delete numberPrototype[symbolIterator];
+
+ assertEquals(getCalls, 1);
+ assertEquals(funcCalls, 1);
+ assertEquals(nextCalls, 3 + 1); // + 1 for {done: true}
+ assertAsyncRan();
+ assertAsyncRan();
+ assertAsyncRan();
+ assertAsyncRan();
+})();
+
+
(function() {
Promise.race([]).chain(
assertUnreachable,
@@ -736,14 +815,6 @@ function assertAsyncDone(iteration) {
})();
(function() {
- Promise.race({}).chain(
- assertUnreachable,
- function(r) { assertAsync(r instanceof TypeError, "one/no-array") }
- )
- assertAsyncRan()
-})();
-
-(function() {
var deferred1 = Promise.defer()
var p1 = deferred1.promise
var deferred2 = Promise.defer()
@@ -804,6 +875,103 @@ function assertAsyncDone(iteration) {
assertAsyncRan()
})();
+
+(function() {
+ function testPromiseRaceNonIterable(value) {
+ Promise.race(value).chain(
+ assertUnreachable,
+ function(r) {
+ assertAsync(r instanceof TypeError, 'race/non iterable');
+ });
+ assertAsyncRan();
+ }
+ testPromiseRaceNonIterable(null);
+ testPromiseRaceNonIterable(undefined);
+ testPromiseRaceNonIterable({});
+ testPromiseRaceNonIterable(42);
+})();
+
+
+(function() {
+ var deferred1 = Promise.defer()
+ var p1 = deferred1.promise
+ var deferred2 = Promise.defer()
+ var p2 = deferred2.promise
+ var deferred3 = Promise.defer()
+ var p3 = deferred3.promise
+ function* f() {
+ yield p1;
+ yield p2;
+ yield p3;
+ }
+ Promise.race(f()).chain(
+ function(x) { assertAsync(x === 3, "race/iterable/resolve/reject") },
+ assertUnreachable
+ )
+ deferred3.resolve(3)
+ deferred1.reject(1)
+ assertAsyncRan()
+})();
+
+(function() {
+ var deferred1 = Promise.defer()
+ var p1 = deferred1.promise
+ var deferred2 = Promise.defer()
+ var p2 = deferred2.promise
+ var deferred3 = Promise.defer()
+ var p3 = deferred3.promise
+ function* f() {
+ yield p1;
+ yield p2;
+ yield p3;
+ }
+ Promise.race(f()).chain(
+ assertUnreachable,
+ function(x) { assertAsync(x === 3, "race/iterable/reject/resolve") }
+ )
+ deferred3.reject(3)
+ deferred1.resolve(1)
+ assertAsyncRan()
+})();
+
+(function() {
+ 'use strict';
+ var getCalls = 0;
+ var funcCalls = 0;
+ var nextCalls = 0;
+ defineProperty(numberPrototype, symbolIterator, {
+ get: function() {
+ assertEquals('number', typeof this);
+ getCalls++;
+ return function() {
+ assertEquals('number', typeof this);
+ funcCalls++;
+ var n = this;
+ var i = 0
+ return {
+ next() {
+ nextCalls++;
+ return {value: i++, done: i > n};
+ }
+ };
+ };
+ },
+ configurable: true
+ });
+
+ Promise.race(3).chain(
+ function(x) {
+ assertAsync(x === 0, "race/iterable/number");
+ },
+ assertUnreachable);
+ delete numberPrototype[symbolIterator];
+
+ assertEquals(getCalls, 1);
+ assertEquals(funcCalls, 1);
+ assertEquals(nextCalls, 3 + 1); // + 1 for {done: true}
+ assertAsyncRan();
+})();
+
(function() {
var log
function MyPromise(resolver) {
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2243.js b/deps/v8/test/mjsunit/es6/regress/regress-2243.js
index e2411d241b..d5377cfd54 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-2243.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2243.js
@@ -25,7 +25,5 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
-
assertThrows("'use strict'; (function f() { f = 123; })()", TypeError);
assertThrows("(function f() { 'use strict'; f = 123; })()", TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2322.js b/deps/v8/test/mjsunit/es6/regress/regress-2322.js
index 1195bab67c..6eca046397 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-2322.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2322.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-scoping
-
"use strict";
assertThrows("'use strict'; for (let x in x);", ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-2506.js b/deps/v8/test/mjsunit/es6/regress/regress-2506.js
index 0eb2770e59..b5cc91dbd7 100644
--- a/deps/v8/test/mjsunit/regress/regress-2506.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2506.js
@@ -1,8 +1,6 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-scoping
'use strict';
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2858.js b/deps/v8/test/mjsunit/es6/regress/regress-2858.js
index 4ce9478497..3b54b17da0 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-2858.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-2858.js
@@ -1,8 +1,6 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-scoping
"use strict";
function f() {
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-3426.js b/deps/v8/test/mjsunit/es6/regress/regress-3426.js
index c3b11a1792..fd4903b1f8 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-3426.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-3426.js
@@ -2,6 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-scoping
-
assertThrows("(function() { 'use strict'; { let f; var f; } })", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-347906.js b/deps/v8/test/mjsunit/es6/regress/regress-347906.js
index c751618928..daa62f5df7 100644
--- a/deps/v8/test/mjsunit/regress/regress-347906.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-347906.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony
+// Flags: --allow-natives-syntax
function foo() {
return Math.clz32(12.34);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-3683.js b/deps/v8/test/mjsunit/es6/regress/regress-3683.js
index a00d82bd1f..2c18632719 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-3683.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-3683.js
@@ -1,8 +1,6 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-//
-// Flags: --harmony-scoping
"use strict";
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-3741.js b/deps/v8/test/mjsunit/es6/regress/regress-3741.js
index 8a9dd9e09c..0c5074a0f8 100644
--- a/deps/v8/test/mjsunit/harmony/regress/regress-3741.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-3741.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// Flags: --harmony-scoping --allow-natives-syntax
+// Flags: --allow-natives-syntax
'use strict';
function f24(deopt) {
let x = 1;
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-3938.js b/deps/v8/test/mjsunit/es6/regress/regress-3938.js
new file mode 100644
index 0000000000..bd7d1befa1
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-3938.js
@@ -0,0 +1,8 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+assertThrows(function() { for (const i = 0; ; i++) {} }, TypeError);
+assertThrows("'use strict'; for (const i = 0; ; i++) {}", TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-411237.js b/deps/v8/test/mjsunit/es6/regress/regress-411237.js
index 8b75ba3015..ece6481737 100644
--- a/deps/v8/test/mjsunit/regress/regress-411237.js
+++ b/deps/v8/test/mjsunit/es6/regress/regress-411237.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --harmony
+// Flags: --allow-natives-syntax
try {
%OptimizeFunctionOnNextCall(print);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-468661.js b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
new file mode 100644
index 0000000000..656190da0b
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-468661.js
@@ -0,0 +1,75 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+var exception = null;
+var break_count = 0;
+
+var expected_values =
+ [ReferenceError, ReferenceError, 0, 0, 0, 0, 0, 1, ReferenceError, ReferenceError];
+
+function listener(event, exec_state, event_data, data) {
+ try {
+ if (event == Debug.DebugEvent.Break) {
+ assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
+ // Count number of expected breakpoints in this source file.
+ if (!break_count) {
+ var source_text = exec_state.frame(0).func().script().source();
+ expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
+ print("Expected breaks: " + expected_breaks);
+ }
+ var frameMirror = exec_state.frame(0);
+
+ var v = null;;
+ try {
+ v = frameMirror.evaluate('i').value();
+ } catch(e) {
+ v = e;
+ }
+ frameMirror.allScopes();
+ var source = frameMirror.sourceLineText();
+ print("paused at: " + source);
+ assertTrue(source.indexOf("// Break " + break_count + ".") > 0,
+ "Unexpected pause at: " + source + "\n" +
+ "Expected: // Break " + break_count + ".");
+ if (expected_values[break_count] === ReferenceError) {
+ assertTrue(v instanceof ReferenceError);
+ } else {
+ assertSame(expected_values[break_count], v);
+ }
+ ++break_count;
+
+ if (break_count !== expected_breaks) {
+ exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+ print("Next step prepared");
+ }
+ }
+ } catch(e) {
+ exception = e;
+ print(e, e.stack);
+ }
+};
+
+Debug.setListener(listener);
+
+var sum = 0;
+(function (){
+ 'use strict';
+
+ debugger; // Break 0.
+
+ for (let i=0; // Break 1.
+ i < 1; // Break 2. // Break 3. // Break 6. // Break 7.
+ i++) {
+ let key = i; // Break 4.
+ sum += key; // Break 5.
+ }
+}()); // Break 8.
+
+assertNull(exception); // Break 9.
+assertEquals(expected_breaks, break_count);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-474783.js b/deps/v8/test/mjsunit/es6/regress/regress-474783.js
new file mode 100644
index 0000000000..e258dcb49f
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-474783.js
@@ -0,0 +1,24 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+"use strict";
+class Base {
+}
+class Subclass extends Base {
+ constructor(a,b,c) {
+ arguments[1];
+ }
+}
+assertThrows(function() { Subclass(); }, TypeError);
+assertThrows(function() { Subclass(1); }, TypeError);
+assertThrows(function() { Subclass(1, 2); }, TypeError);
+assertThrows(function() { Subclass(1, 2, 3); }, TypeError);
+assertThrows(function() { Subclass(1, 2, 3, 4); }, TypeError);
+
+assertThrows(function() { Subclass.call(); }, TypeError);
+assertThrows(function() { Subclass.call({}); }, TypeError);
+assertThrows(function() { Subclass.call({}, 1); }, TypeError);
+assertThrows(function() { Subclass.call({}, 1, 2); }, TypeError);
+assertThrows(function() { Subclass.call({}, 1, 2, 3, 4); }, TypeError);
diff --git a/deps/v8/test/mjsunit/es6/set-minus-zero.js b/deps/v8/test/mjsunit/es6/set-minus-zero.js
new file mode 100644
index 0000000000..792332c648
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/set-minus-zero.js
@@ -0,0 +1,51 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+var set = new Set();
+
+var objectKey = {};
+var stringKey = 'keykeykey';
+var numberKey = 42.24;
+var booleanKey = true;
+var undefinedKey = undefined;
+var nullKey = null;
+var nanKey = NaN;
+var zeroKey = 0;
+var minusZeroKey = -0;
+
+assertEquals(set.size, 0);
+
+set.add(objectKey);
+set.add(stringKey);
+set.add(numberKey);
+set.add(booleanKey);
+set.add(undefinedKey);
+set.add(nullKey);
+set.add(nanKey);
+set.add(zeroKey);
+
+assertEquals(8, set.size);
+
+assertTrue(set.has(objectKey));
+assertTrue(set.has(stringKey));
+assertTrue(set.has(numberKey));
+assertTrue(set.has(booleanKey));
+assertTrue(set.has(undefinedKey));
+assertTrue(set.has(nullKey));
+assertTrue(set.has(nanKey));
+assertTrue(set.has(zeroKey));
+
+assertFalse(set.has({}));
+assertTrue(set.has('keykeykey'));
+assertTrue(set.has(42.24));
+assertTrue(set.has(true));
+assertTrue(set.has(undefined));
+assertTrue(set.has(null));
+assertTrue(set.has(NaN));
+assertTrue(set.has(0));
+assertTrue(set.has(-0));
+assertTrue(set.has(1 / Infinity));
+assertTrue(set.has(-1 / Infinity));
diff --git a/deps/v8/test/mjsunit/harmony/string-codepointat.js b/deps/v8/test/mjsunit/es6/string-codepointat.js
index 411b0f23c7..976d5be6af 100644
--- a/deps/v8/test/mjsunit/harmony/string-codepointat.js
+++ b/deps/v8/test/mjsunit/es6/string-codepointat.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-strings
-
// Tests taken from:
// https://github.com/mathiasbynens/String.prototype.codePointAt
diff --git a/deps/v8/test/mjsunit/harmony/string-endswith.js b/deps/v8/test/mjsunit/es6/string-endswith.js
index cc76b5fe4e..cbf2ed8766 100644
--- a/deps/v8/test/mjsunit/harmony/string-endswith.js
+++ b/deps/v8/test/mjsunit/es6/string-endswith.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-strings
-
assertEquals(1, String.prototype.endsWith.length);
var testString = "Hello World";
diff --git a/deps/v8/test/mjsunit/harmony/string-fromcodepoint.js b/deps/v8/test/mjsunit/es6/string-fromcodepoint.js
index 97ecf0eec5..c4634ace44 100644
--- a/deps/v8/test/mjsunit/harmony/string-fromcodepoint.js
+++ b/deps/v8/test/mjsunit/es6/string-fromcodepoint.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-strings
-
// Tests taken from:
// https://github.com/mathiasbynens/String.fromCodePoint
diff --git a/deps/v8/test/mjsunit/harmony/string-includes.js b/deps/v8/test/mjsunit/es6/string-includes.js
index 33ed8ea44c..61bf779483 100644
--- a/deps/v8/test/mjsunit/harmony/string-includes.js
+++ b/deps/v8/test/mjsunit/es6/string-includes.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-strings
-
assertEquals(1, String.prototype.includes.length);
var reString = "asdf[a-z]+(asdf)?";
diff --git a/deps/v8/test/mjsunit/harmony/string-raw.js b/deps/v8/test/mjsunit/es6/string-raw.js
index 28e2af9164..2c6bb2ff30 100644
--- a/deps/v8/test/mjsunit/harmony/string-raw.js
+++ b/deps/v8/test/mjsunit/es6/string-raw.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-templates
-
(function testStringRawArity() {
assertEquals(1, String.raw.length);
})();
@@ -256,3 +254,39 @@
assertEquals("12345", String.raw(callSiteObj, arg(2), arg(4), arg(6)));
assertEquals(["length", "raw1", "arg2", "raw3", "arg4", "raw5"], order);
})();
+
+
+(function testStringRawToStringSubstitutionsOrder() {
+ var subs = [];
+ var log = [];
+ function stringify(toString) {
+ var valueOf = "_" + toString + "_";
+ return {
+ toString: function() { return toString; },
+ valueOf: function() { return valueOf; }
+ };
+ }
+ function getter(name, value) {
+ return {
+ get: function() {
+ log.push("get" + name);
+ return value;
+ },
+ set: function(v) {
+ log.push("set" + name);
+ }
+ };
+ }
+ Object.defineProperties(subs, {
+ 0: getter(0, stringify("a")),
+ 1: getter(1, stringify("b")),
+ 2: getter(2, stringify("c"))
+ });
+
+ assertEquals("-a-b-c-", String.raw`-${subs[0]}-${subs[1]}-${subs[2]}-`);
+ assertArrayEquals(["get0", "get1", "get2"], log);
+
+ log.length = 0;
+ assertEquals("-a-", String.raw`-${subs[0]}-`);
+ assertArrayEquals(["get0"], log);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/string-repeat.js b/deps/v8/test/mjsunit/es6/string-repeat.js
index 0af74483a0..15caea14f3 100644
--- a/deps/v8/test/mjsunit/harmony/string-repeat.js
+++ b/deps/v8/test/mjsunit/es6/string-repeat.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-strings
-
assertEquals("000", String.prototype.repeat.call(0, 3));
assertEquals("-1-1-1", String.prototype.repeat.call(-1, 3));
assertEquals("2.12.12.1", String.prototype.repeat.call(2.1, 3));
diff --git a/deps/v8/test/mjsunit/harmony/string-startswith.js b/deps/v8/test/mjsunit/es6/string-startswith.js
index d72f2946f5..887db994a0 100644
--- a/deps/v8/test/mjsunit/harmony/string-startswith.js
+++ b/deps/v8/test/mjsunit/es6/string-startswith.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-strings
-
assertEquals(1, String.prototype.startsWith.length);
var testString = "Hello World";
diff --git a/deps/v8/test/mjsunit/es6/symbols.js b/deps/v8/test/mjsunit/es6/symbols.js
index b9811f509e..46c3daba8a 100644
--- a/deps/v8/test/mjsunit/es6/symbols.js
+++ b/deps/v8/test/mjsunit/es6/symbols.js
@@ -245,25 +245,20 @@ TestCall()
function TestCollections() {
var set = new Set
var map = new Map
- var weakmap = new WeakMap
for (var i in symbols) {
set.add(symbols[i])
map.set(symbols[i], i)
- weakmap.set(symbols[i], i)
}
assertEquals(symbols.length, set.size)
assertEquals(symbols.length, map.size)
for (var i in symbols) {
assertTrue(set.has(symbols[i]))
assertTrue(map.has(symbols[i]))
- assertTrue(weakmap.has(symbols[i]))
assertEquals(i, map.get(symbols[i]))
- assertEquals(i, weakmap.get(symbols[i]))
}
for (var i in symbols) {
assertTrue(set.delete(symbols[i]))
assertTrue(map.delete(symbols[i]))
- assertTrue(weakmap.delete(symbols[i]))
}
assertEquals(0, set.size)
assertEquals(0, map.size)
diff --git a/deps/v8/test/mjsunit/harmony/templates.js b/deps/v8/test/mjsunit/es6/templates.js
index a884f58fb6..15296e8722 100644
--- a/deps/v8/test/mjsunit/harmony/templates.js
+++ b/deps/v8/test/mjsunit/es6/templates.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-templates --harmony-unicode
+// Flags: --harmony-unicode
var num = 5;
var str = "str";
@@ -423,10 +423,12 @@ var obj = {
Object.defineProperty(Array.prototype, 0, {
set: function() {
assertUnreachable();
- }
+ },
+ configurable: true
});
function tag(){}
tag`a${1}b`;
+ delete Array.prototype[0];
})();
@@ -518,3 +520,162 @@ var obj = {
assertThrows("`${(function() { \"use strict\"; return \"\\07\"; })()}`",
SyntaxError);
})();
+
+
+var global = this;
+(function testCallNew() {
+ "use strict";
+ var called = false;
+ var calledWith;
+ global.log = function(x) { called = true; calledWith = x; }
+
+ assertInstanceof(new Function`log("test")`, Object);
+ assertTrue(called);
+ assertSame("test", calledWith);
+ delete global.log;
+})();
+
+
+(function testCallNew2() {
+ "use strict";
+ var log = [];
+ function tag(x) {
+ log.push(x);
+ if (!(this instanceof tag)) {
+ return tag;
+ }
+ this.x = x === void 0 ? null : x;
+ return this;
+ }
+ // No arguments passed to constructor
+ var instance = new tag`x``y``z`;
+ assertInstanceof(instance, tag);
+ assertSame(tag.prototype, Object.getPrototypeOf(instance));
+ assertEquals({ x: null }, instance);
+ assertEquals([["x"], ["y"], ["z"], undefined], log);
+
+ // Arguments passed to constructor
+ log.length = 0;
+ instance = new tag`x2` `y2` `z2` (`test`);
+ assertInstanceof(instance, tag);
+ assertSame(tag.prototype, Object.getPrototypeOf(instance));
+ assertEquals({ x: "test" }, instance);
+ assertEquals([["x2"], ["y2"], ["z2"], "test"], log);
+})();
+
+
+(function testCallResultOfTagFn() {
+ "use strict";
+ var i = 0;
+ var raw = [];
+ function tag(cs) {
+ var args = Array.prototype.slice.call(arguments);
+ var text = String.raw.apply(null, args);
+ if (i++ < 2) {
+ raw.push("tag;" + text);
+ return tag;
+ }
+
+ raw.push("raw;" + text);
+ return text;
+ }
+ assertEquals("test3", tag`test1``test2``test3`);
+ assertEquals([
+ "tag;test1",
+ "tag;test2",
+ "raw;test3"
+ ], raw);
+})();
+
+
+(function testToStringSubstitutions() {
+ var a = {
+ toString: function() { return "a"; },
+ valueOf: function() { return "-a-"; }
+ };
+ var b = {
+ toString: function() { return "b"; },
+ valueOf: function() { return "-b-"; }
+ };
+ assertEquals("a", `${a}`);
+ assertEquals("ab", `${a}${b}`);
+ assertEquals("-a--b-", `${a + b}`);
+ assertEquals("-a-", `${a + ""}`);
+ assertEquals("1a", `1${a}`);
+ assertEquals("1a2", `1${a}2`);
+ assertEquals("1a2b", `1${a}2${b}`);
+ assertEquals("1a2b3", `1${a}2${b}3`);
+})();
+
+
+(function testToStringSubstitutionsOrder() {
+ var subs = [];
+ var log = [];
+ function getter(name, value) {
+ return {
+ get: function() {
+ log.push("get" + name);
+ return value;
+ },
+ set: function(v) {
+ log.push("set" + name);
+ }
+ };
+ }
+ Object.defineProperties(subs, {
+ 0: getter(0, "a"),
+ 1: getter(1, "b"),
+ 2: getter(2, "c")
+ });
+
+ assertEquals("-a-b-c-", `-${subs[0]}-${subs[1]}-${subs[2]}-`);
+ assertArrayEquals(["get0", "get1", "get2"], log);
+})();
+
+
+(function testTaggedToStringSubstitutionsOrder() {
+ var subs = [];
+ var log = [];
+ var tagged = [];
+ function getter(name, value) {
+ return {
+ get: function() {
+ log.push("get" + name);
+ return value;
+ },
+ set: function(v) {
+ log.push("set" + name);
+ }
+ };
+ }
+ Object.defineProperties(subs, {
+ 0: getter(0, 1),
+ 1: getter(1, 2),
+ 2: getter(2, 3)
+ });
+
+ function tag(cs) {
+ var n_substitutions = arguments.length - 1;
+ var n_cooked = cs.length;
+ var e = cs[0];
+ var i = 0;
+ assertEquals(n_cooked, n_substitutions + 1);
+ while (i < n_substitutions) {
+ var sub = arguments[i++ + 1];
+ var tail = cs[i];
+ tagged.push(sub);
+ e = e.concat(sub, tail);
+ }
+ return e;
+ }
+
+ assertEquals("-1-2-3-", tag`-${subs[0]}-${subs[1]}-${subs[2]}-`);
+ assertArrayEquals(["get0", "get1", "get2"], log);
+ assertArrayEquals([1, 2, 3], tagged);
+
+ tagged.length = 0;
+ log.length = 0;
+ assertEquals("-1-", tag`-${subs[0]}-`);
+ assertArrayEquals(["get0"], log);
+ assertArrayEquals([1], tagged);
+})();
diff --git a/deps/v8/test/mjsunit/es7/object-observe.js b/deps/v8/test/mjsunit/es7/object-observe.js
index 5af205eadf..b2853c4048 100644
--- a/deps/v8/test/mjsunit/es7/object-observe.js
+++ b/deps/v8/test/mjsunit/es7/object-observe.js
@@ -1142,7 +1142,9 @@ var properties = ["a", "1", 1, "length", "setPrototype", "name", "caller"];
function blacklisted(obj, prop) {
return (obj instanceof Int32Array && prop == 1) ||
(obj instanceof Int32Array && prop === "length") ||
- (obj instanceof ArrayBuffer && prop == 1)
+ (obj instanceof ArrayBuffer && prop == 1) ||
+ (obj instanceof Function && prop === "name") || // Has its own test.
+ (obj instanceof Function && prop === "length"); // Has its own test.
}
for (var i in objects) for (var j in properties) {
@@ -1798,3 +1800,66 @@ for (var b1 = 0; b1 < 2; ++b1)
for (var n = 0; n < 3; ++n)
for (var i in mutationByIncr)
TestFastElementsLength(mutationByIncr[i], b1 != 0, b2 != 0, 7*n, 7*n+1);
+
+
+(function TestFunctionName() {
+ reset();
+
+ function fun() {}
+ Object.observe(fun, observer.callback);
+ fun.name = 'x'; // No change. Not writable.
+ Object.defineProperty(fun, 'name', {value: 'a'});
+ Object.defineProperty(fun, 'name', {writable: true});
+ fun.name = 'b';
+ delete fun.name;
+ fun.name = 'x'; // No change. Function.prototype.name is non writable
+ Object.defineProperty(Function.prototype, 'name', {writable: true});
+ fun.name = 'c';
+ fun.name = 'c'; // Same, no update.
+ Object.deliverChangeRecords(observer.callback);
+ observer.assertCallbackRecords([
+ { object: fun, type: 'update', name: 'name', oldValue: 'fun' },
+ { object: fun, type: 'reconfigure', name: 'name'},
+ { object: fun, type: 'update', name: 'name', oldValue: 'a' },
+ { object: fun, type: 'delete', name: 'name', oldValue: 'b' },
+ { object: fun, type: 'add', name: 'name' },
+ ]);
+})();
+
+
+(function TestFunctionLength() {
+ reset();
+
+ function fun(x) {}
+ Object.observe(fun, observer.callback);
+ fun.length = 'x'; // No change. Not writable.
+ Object.defineProperty(fun, 'length', {value: 'a'});
+ Object.defineProperty(fun, 'length', {writable: true});
+ fun.length = 'b';
+ delete fun.length;
+ fun.length = 'x'; // No change. Function.prototype.length is non writable
+ Object.defineProperty(Function.prototype, 'length', {writable: true});
+ fun.length = 'c';
+ fun.length = 'c'; // Same, no update.
+ Object.deliverChangeRecords(observer.callback);
+ observer.assertCallbackRecords([
+ { object: fun, type: 'update', name: 'length', oldValue: 1 },
+ { object: fun, type: 'reconfigure', name: 'length'},
+ { object: fun, type: 'update', name: 'length', oldValue: 'a' },
+ { object: fun, type: 'delete', name: 'length', oldValue: 'b' },
+ { object: fun, type: 'add', name: 'length' },
+ ]);
+})();
+
+
+(function TestObserveInvalidAcceptMessage() {
+ var ex;
+ try {
+ Object.observe({}, function(){}, "not an object");
+ } catch (e) {
+ ex = e;
+ }
+ assertInstanceof(ex, TypeError);
+ assertEquals("Third argument to Object.observe must be an array of strings.",
+ ex.message);
+})()
diff --git a/deps/v8/test/mjsunit/function-length-accessor.js b/deps/v8/test/mjsunit/function-length-accessor.js
index 97c9f65822..386ac99643 100644
--- a/deps/v8/test/mjsunit/function-length-accessor.js
+++ b/deps/v8/test/mjsunit/function-length-accessor.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-scoping --lazy
+// Flags: --lazy
function foo(a, b, c, d) {
"use strict"
diff --git a/deps/v8/test/mjsunit/function-prototype.js b/deps/v8/test/mjsunit/function-prototype.js
index 7eac6df121..76ab53cf07 100644
--- a/deps/v8/test/mjsunit/function-prototype.js
+++ b/deps/v8/test/mjsunit/function-prototype.js
@@ -34,6 +34,9 @@ function TestNonObjectPrototype(value) {
var f = new F();
assertEquals(value, F.prototype);
assertEquals(Object.prototype, f.__proto__);
+ // Test that map transitions don't break anything.
+ F.property = "value";
+ assertEquals(value, F.prototype);
}
var values = [123, "asdf", true];
diff --git a/deps/v8/test/mjsunit/harmony/block-const-assign.js b/deps/v8/test/mjsunit/harmony/block-const-assign.js
deleted file mode 100644
index c21a0a3480..0000000000
--- a/deps/v8/test/mjsunit/harmony/block-const-assign.js
+++ /dev/null
@@ -1,137 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-scoping
-
-// Test that we throw early syntax errors in harmony mode
-// when using an immutable binding in an assigment or with
-// prefix/postfix decrement/increment operators.
-
-"use strict";
-
-// Function local const.
-function constDecl0(use) {
- return "(function() { const constvar = 1; " + use + "; })();";
-}
-
-
-function constDecl1(use) {
- return "(function() { " + use + "; const constvar = 1; })();";
-}
-
-
-// Function local const, assign from eval.
-function constDecl2(use) {
- use = "eval('(function() { " + use + " })')()";
- return "(function() { const constvar = 1; " + use + "; })();";
-}
-
-
-function constDecl3(use) {
- use = "eval('(function() { " + use + " })')()";
- return "(function() { " + use + "; const constvar = 1; })();";
-}
-
-
-// Block local const.
-function constDecl4(use) {
- return "(function() { { const constvar = 1; " + use + "; } })();";
-}
-
-
-function constDecl5(use) {
- return "(function() { { " + use + "; const constvar = 1; } })();";
-}
-
-
-// Block local const, assign from eval.
-function constDecl6(use) {
- use = "eval('(function() {" + use + "})')()";
- return "(function() { { const constvar = 1; " + use + "; } })();";
-}
-
-
-function constDecl7(use) {
- use = "eval('(function() {" + use + "})')()";
- return "(function() { { " + use + "; const constvar = 1; } })();";
-}
-
-
-// Function expression name.
-function constDecl8(use) {
- return "(function constvar() { " + use + "; })();";
-}
-
-
-// Function expression name, assign from eval.
-function constDecl9(use) {
- use = "eval('(function(){" + use + "})')()";
- return "(function constvar() { " + use + "; })();";
-}
-
-let decls = [ constDecl0,
- constDecl1,
- constDecl2,
- constDecl3,
- constDecl4,
- constDecl5,
- constDecl6,
- constDecl7,
- constDecl8,
- constDecl9
- ];
-let declsForTDZ = new Set([constDecl1, constDecl3, constDecl5, constDecl7]);
-let uses = [ 'constvar = 1;',
- 'constvar += 1;',
- '++constvar;',
- 'constvar++;'
- ];
-
-function Test(d,u) {
- 'use strict';
- try {
- print(d(u));
- eval(d(u));
- } catch (e) {
- if (declsForTDZ.has(d) && u !== uses[0]) {
- // In these cases, read of a const variable occurs
- // before a write to it, so TDZ kicks in before const check.
- assertInstanceof(e, ReferenceError);
- return;
- }
- assertInstanceof(e, TypeError);
- assertTrue(e.toString().indexOf("Assignment to constant variable") >= 0);
- return;
- }
- assertUnreachable();
-}
-
-for (var d = 0; d < decls.length; ++d) {
- for (var u = 0; u < uses.length; ++u) {
- Test(decls[d], uses[u]);
- }
-}
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names-classes.js b/deps/v8/test/mjsunit/harmony/computed-property-names-classes.js
index 4e50f8a461..ab5d39e867 100644
--- a/deps/v8/test/mjsunit/harmony/computed-property-names-classes.js
+++ b/deps/v8/test/mjsunit/harmony/computed-property-names-classes.js
@@ -312,41 +312,74 @@ function assertIteratorResult(value, done, result) {
(function TestPrototype() {
- // Normally a static prototype property is not allowed.
- class C {
- static ['prototype']() {
- return 1;
+ assertThrows(function() {
+ class C {
+ static ['prototype']() {
+ return 1;
+ }
}
- }
- assertEquals(1, C.prototype());
+ }, TypeError);
- class C2 {
- static get ['prototype']() {
- return 2;
+ assertThrows(function() {
+ class C2 {
+ static get ['prototype']() {
+ return 2;
+ }
}
- }
- assertEquals(2, C2.prototype);
+ }, TypeError);
- var calls = 0;
- class C3 {
- static set ['prototype'](x) {
- assertEquals(3, x);
- calls++;
+ assertThrows(function() {
+ class C3 {
+ static set ['prototype'](x) {
+ assertEquals(3, x);
+ }
}
- }
- C3.prototype = 3;
- assertEquals(1, calls);
+ }, TypeError);
+
+ assertThrows(function() {
+ class C4 {
+ static *['prototype']() {
+ yield 1;
+ yield 2;
+ }
+ }
+ }, TypeError);
+})();
- class C4 {
- static *['prototype']() {
- yield 1;
- yield 2;
+
+(function TestPrototypeConcat() {
+ assertThrows(function() {
+ class C {
+ static ['pro' + 'tot' + 'ype']() {
+ return 1;
+ }
}
- }
- var iter = C4.prototype();
- assertIteratorResult(1, false, iter.next());
- assertIteratorResult(2, false, iter.next());
- assertIteratorResult(undefined, true, iter.next());
+ }, TypeError);
+
+ assertThrows(function() {
+ class C2 {
+ static get ['pro' + 'tot' + 'ype']() {
+ return 2;
+ }
+ }
+ }, TypeError);
+
+ assertThrows(function() {
+ class C3 {
+ static set ['pro' + 'tot' + 'ype'](x) {
+ assertEquals(3, x);
+ }
+ }
+ }, TypeError);
+
+ assertThrows(function() {
+ class C4 {
+ static *['pro' + 'tot' + 'ype']() {
+ yield 1;
+ yield 2;
+ }
+ }
+ }, TypeError);
})();
@@ -388,3 +421,45 @@ function assertIteratorResult(value, done, result) {
assertIteratorResult(2, false, iter.next());
assertIteratorResult(undefined, true, iter.next());
})();
+
+
+(function TestExceptionInName() {
+ function MyError() {};
+ function throwMyError() {
+ throw new MyError();
+ }
+ assertThrows(function() {
+ class C {
+ [throwMyError()]() {}
+ }
+ }, MyError);
+ assertThrows(function() {
+ class C {
+ get [throwMyError()]() { return 42; }
+ }
+ }, MyError);
+ assertThrows(function() {
+ class C {
+ set [throwMyError()](_) { }
+ }
+ }, MyError);
+})();
+
+
+(function TestTdzName() {
+ assertThrows(function() {
+ class C {
+ [C]() {}
+ }
+ }, ReferenceError);
+ assertThrows(function() {
+ class C {
+ get [C]() { return 42; }
+ }
+ }, ReferenceError);
+ assertThrows(function() {
+ class C {
+ set [C](_) { }
+ }
+ }, ReferenceError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/computed-property-names.js b/deps/v8/test/mjsunit/harmony/computed-property-names.js
index 69360771c1..36e1411169 100644
--- a/deps/v8/test/mjsunit/harmony/computed-property-names.js
+++ b/deps/v8/test/mjsunit/harmony/computed-property-names.js
@@ -277,3 +277,26 @@ function ID(x) {
assertEquals('X', object.x);
assertEquals(proto, Object.getPrototypeOf(object));
})();
+
+
+(function TestExceptionInName() {
+ function MyError() {};
+ function throwMyError() {
+ throw new MyError();
+ }
+ assertThrows(function() {
+ var o = {
+ [throwMyError()]: 42
+ };
+ }, MyError);
+ assertThrows(function() {
+ var o = {
+ get [throwMyError()]() { return 42; }
+ };
+ }, MyError);
+ assertThrows(function() {
+ var o = {
+ set [throwMyError()](_) { }
+ };
+ }, MyError);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/module-linking.js b/deps/v8/test/mjsunit/harmony/module-linking.js
index 3a5bc89793..faaf7f2e49 100644
--- a/deps/v8/test/mjsunit/harmony/module-linking.js
+++ b/deps/v8/test/mjsunit/harmony/module-linking.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-modules --harmony-scoping
+// Flags: --harmony-modules
// Test basic module linking and initialization.
diff --git a/deps/v8/test/mjsunit/harmony/module-resolution.js b/deps/v8/test/mjsunit/harmony/module-resolution.js
index 1a95347d14..7f1e431313 100644
--- a/deps/v8/test/mjsunit/harmony/module-resolution.js
+++ b/deps/v8/test/mjsunit/harmony/module-resolution.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-modules --harmony-scoping
+// Flags: --harmony-modules
// Test basic module interface inference.
diff --git a/deps/v8/test/mjsunit/harmony/private.js b/deps/v8/test/mjsunit/harmony/private.js
index 218094c3d5..c08daf1050 100644
--- a/deps/v8/test/mjsunit/harmony/private.js
+++ b/deps/v8/test/mjsunit/harmony/private.js
@@ -196,25 +196,20 @@ TestSet()
function TestCollections() {
var set = new Set
var map = new Map
- var weakmap = new WeakMap
for (var i in symbols) {
set.add(symbols[i])
map.set(symbols[i], i)
- weakmap.set(symbols[i], i)
}
assertEquals(symbols.length, set.size)
assertEquals(symbols.length, map.size)
for (var i in symbols) {
assertTrue(set.has(symbols[i]))
assertTrue(map.has(symbols[i]))
- assertTrue(weakmap.has(symbols[i]))
assertEquals(i, map.get(symbols[i]))
- assertEquals(i, weakmap.get(symbols[i]))
}
for (var i in symbols) {
assertTrue(set.delete(symbols[i]))
assertTrue(map.delete(symbols[i]))
- assertTrue(weakmap.delete(symbols[i]))
}
assertEquals(0, set.size)
assertEquals(0, map.size)
diff --git a/deps/v8/test/mjsunit/harmony/reflect-apply.js b/deps/v8/test/mjsunit/harmony/reflect-apply.js
new file mode 100644
index 0000000000..2cfb98282b
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/reflect-apply.js
@@ -0,0 +1,212 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-reflect
+
+
+(function testReflectApplyArity() {
+ assertEquals(3, Reflect.apply.length);
+})();
+
+
+(function testReflectApplyNonConstructor() {
+ assertThrows(function() {
+ new Reflect.apply(function(){}, null, []);
+ }, TypeError);
+})();
+
+
+(function testAppliedReceiverSloppy() {
+ function returnThis() { return this; }
+ var receiver = {};
+
+ assertSame(this, Reflect.apply(returnThis, void 0, []));
+ assertSame(this, Reflect.apply(returnThis, null, []));
+ assertSame(this, Reflect.apply(returnThis, this, []));
+ assertSame(receiver, Reflect.apply(returnThis, receiver, []));
+
+ // Wrap JS values
+ assertSame(String.prototype,
+ Object.getPrototypeOf(Reflect.apply(returnThis, "str", [])));
+ assertSame(Number.prototype,
+ Object.getPrototypeOf(Reflect.apply(returnThis, 123, [])));
+ assertSame(Boolean.prototype,
+ Object.getPrototypeOf(Reflect.apply(returnThis, true, [])));
+ assertSame(Symbol.prototype,
+ Object.getPrototypeOf(
+ Reflect.apply(returnThis, Symbol("test"), [])));
+})();
+
+
+(function testAppliedReceiverStrict() {
+ function returnThis() { 'use strict'; return this; }
+ var receiver = {};
+
+ assertSame(void 0, Reflect.apply(returnThis, void 0, []));
+ assertSame(this, Reflect.apply(returnThis, this, []));
+ assertSame(receiver, Reflect.apply(returnThis, receiver, []));
+
+ // Don't wrap value types
+ var regexp = /123/;
+ var symbol = Symbol("test");
+ assertSame("str", Reflect.apply(returnThis, "str", []));
+ assertSame(123, Reflect.apply(returnThis, 123, []));
+ assertSame(true, Reflect.apply(returnThis, true, []));
+ assertSame(regexp, Reflect.apply(returnThis, regexp, []));
+ assertSame(symbol, Reflect.apply(returnThis, symbol, []));
+})();
+
+
+(function testAppliedArgumentsLength() {
+ function returnLengthStrict() { 'use strict'; return arguments.length; }
+ function returnLengthSloppy() { return arguments.length; }
+
+ assertEquals(0, Reflect.apply(returnLengthStrict, this, []));
+ assertEquals(0, Reflect.apply(returnLengthSloppy, this, []));
+ assertEquals(0, Reflect.apply(returnLengthStrict, this, {}));
+ assertEquals(0, Reflect.apply(returnLengthSloppy, this, {}));
+
+ for (var i = 0; i < 256; ++i) {
+ assertEquals(i, Reflect.apply(returnLengthStrict, this, new Array(i)));
+ assertEquals(i, Reflect.apply(returnLengthSloppy, this, new Array(i)));
+ assertEquals(i, Reflect.apply(returnLengthStrict, this, { length: i }));
+ assertEquals(i, Reflect.apply(returnLengthSloppy, this, { length: i }));
+ }
+})();
+
+
+(function testAppliedArgumentsLengthThrows() {
+ function noopStrict() { 'use strict'; }
+ function noopSloppy() { }
+ function MyError() {}
+
+ var argsList = {};
+ Object.defineProperty(argsList, "length", {
+ get: function() { throw new MyError(); }
+ });
+
+ assertThrows(function() {
+ Reflect.apply(noopStrict, this, argsList);
+ }, MyError);
+
+ assertThrows(function() {
+ Reflect.apply(noopSloppy, this, argsList);
+ }, MyError);
+})();
+
+
+(function testAppliedArgumentsElementThrows() {
+ function noopStrict() { 'use strict'; }
+ function noopSloppy() { }
+ function MyError() {}
+
+ var argsList = { length: 1 };
+ Object.defineProperty(argsList, "0", {
+ get: function() { throw new MyError(); }
+ });
+
+ assertThrows(function() {
+ Reflect.apply(noopStrict, this, argsList);
+ }, MyError);
+
+ assertThrows(function() {
+ Reflect.apply(noopSloppy, this, argsList);
+ }, MyError);
+})();
+
+
+(function testAppliedNonFunctionStrict() {
+ 'use strict';
+ assertThrows(function() { Reflect.apply(void 0); }, TypeError);
+ assertThrows(function() { Reflect.apply(null); }, TypeError);
+ assertThrows(function() { Reflect.apply(123); }, TypeError);
+ assertThrows(function() { Reflect.apply("str"); }, TypeError);
+ assertThrows(function() { Reflect.apply(Symbol("x")); }, TypeError);
+ assertThrows(function() { Reflect.apply(/123/); }, TypeError);
+ assertThrows(function() { Reflect.apply(NaN); }, TypeError);
+ assertThrows(function() { Reflect.apply({}); }, TypeError);
+ assertThrows(function() { Reflect.apply([]); }, TypeError);
+})();
+
+
+(function testAppliedNonFunctionSloppy() {
+ assertThrows(function() { Reflect.apply(void 0); }, TypeError);
+ assertThrows(function() { Reflect.apply(null); }, TypeError);
+ assertThrows(function() { Reflect.apply(123); }, TypeError);
+ assertThrows(function() { Reflect.apply("str"); }, TypeError);
+ assertThrows(function() { Reflect.apply(Symbol("x")); }, TypeError);
+ assertThrows(function() { Reflect.apply(/123/); }, TypeError);
+ assertThrows(function() { Reflect.apply(NaN); }, TypeError);
+ assertThrows(function() { Reflect.apply({}); }, TypeError);
+ assertThrows(function() { Reflect.apply([]); }, TypeError);
+})();
+
+
+(function testAppliedArgumentsNonList() {
+ function noopStrict() { 'use strict'; }
+ function noopSloppy() {}
+ var R = void 0;
+ assertThrows(function() { Reflect.apply(noopStrict, R, null); }, TypeError);
+ assertThrows(function() { Reflect.apply(noopSloppy, R, null); }, TypeError);
+ assertThrows(function() { Reflect.apply(noopStrict, R, 1); }, TypeError);
+ assertThrows(function() { Reflect.apply(noopSloppy, R, 1); }, TypeError);
+ assertThrows(function() { Reflect.apply(noopStrict, R, "BAD"); }, TypeError);
+ assertThrows(function() { Reflect.apply(noopSloppy, R, "BAD"); }, TypeError);
+ assertThrows(function() { Reflect.apply(noopStrict, R, true); }, TypeError);
+ assertThrows(function() { Reflect.apply(noopSloppy, R, true); }, TypeError);
+ var sym = Symbol("x");
+ assertThrows(function() { Reflect.apply(noopStrict, R, sym); }, TypeError);
+ assertThrows(function() { Reflect.apply(noopSloppy, R, sym); }, TypeError);
+})();
+
+
+(function testAppliedArgumentValue() {
+ function returnFirstStrict(a) { 'use strict'; return a; }
+ function returnFirstSloppy(a) { return a; }
+ function returnLastStrict(a) {
+ 'use strict'; return arguments[arguments.length - 1]; }
+ function returnLastSloppy(a) { return arguments[arguments.length - 1]; }
+ function returnSumStrict() {
+ 'use strict';
+ var sum = arguments[0];
+ for (var i = 1; i < arguments.length; ++i) {
+ sum += arguments[i];
+ }
+ return sum;
+ }
+ function returnSumSloppy() {
+ var sum = arguments[0];
+ for (var i = 1; i < arguments.length; ++i) {
+ sum += arguments[i];
+ }
+ return sum;
+ }
+
+ assertEquals("OK!", Reflect.apply(returnFirstStrict, this, ["OK!"]));
+ assertEquals("OK!", Reflect.apply(returnFirstSloppy, this, ["OK!"]));
+ assertEquals("OK!", Reflect.apply(returnFirstStrict, this,
+ { 0: "OK!", length: 1 }));
+ assertEquals("OK!", Reflect.apply(returnFirstSloppy, this,
+ { 0: "OK!", length: 1 }));
+ assertEquals("OK!", Reflect.apply(returnLastStrict, this,
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, "OK!"]));
+ assertEquals("OK!", Reflect.apply(returnLastSloppy, this,
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, "OK!"]));
+ assertEquals("OK!", Reflect.apply(returnLastStrict, this,
+ { 9: "OK!", length: 10 }));
+ assertEquals("OK!", Reflect.apply(returnLastSloppy, this,
+ { 9: "OK!", length: 10 }));
+ assertEquals("TEST", Reflect.apply(returnSumStrict, this,
+ ["T", "E", "S", "T"]));
+ assertEquals("TEST!!", Reflect.apply(returnSumStrict, this,
+ ["T", "E", "S", "T", "!", "!"]));
+ assertEquals(10, Reflect.apply(returnSumStrict, this,
+ { 0: 1, 1: 2, 2: 3, 3: 4, length: 4 }));
+ assertEquals("TEST", Reflect.apply(returnSumSloppy, this,
+ ["T", "E", "S", "T"]));
+ assertEquals("TEST!!", Reflect.apply(returnSumSloppy, this,
+ ["T", "E", "S", "T", "!", "!"]));
+ assertEquals(10, Reflect.apply(returnSumSloppy, this,
+ { 0: 1, 1: 2, 2: 3, 3: 4, length: 4 }));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/reflect-construct.js b/deps/v8/test/mjsunit/harmony/reflect-construct.js
new file mode 100644
index 0000000000..2211e3f783
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/reflect-construct.js
@@ -0,0 +1,277 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-reflect
+
+
+(function testReflectConstructArity() {
+ assertEquals(2, Reflect.construct.length);
+})();
+
+
+(function testReflectConstructNonConstructor() {
+ assertThrows(function() {
+ new Reflect.construct(function(){}, []);
+ }, TypeError);
+})();
+
+
+(function testReflectConstructBasic() {
+ function Constructor() { "use strict"; }
+ assertInstanceof(Reflect.construct(Constructor, []), Constructor);
+})();
+
+
+(function testReflectConstructBasicSloppy() {
+ function Constructor() {}
+ assertInstanceof(Reflect.construct(Constructor, []), Constructor);
+})();
+
+
+(function testReflectConstructReturnSomethingElseStrict() {
+ var R = {};
+ function Constructor() { "use strict"; return R; }
+ assertSame(R, Reflect.construct(Constructor, []));
+})();
+
+
+(function testReflectConstructReturnSomethingElseSloppy() {
+ var R = {};
+ function Constructor() { return R; }
+ assertSame(R, Reflect.construct(Constructor, []));
+})();
+
+
+(function testReflectConstructNewTargetStrict() {
+ "use strict";
+ function Constructor() { this[9] = 1; }
+ var O = Reflect.construct(Constructor, [], Array);
+ assertEquals(1, O[9]);
+ // Ordinary object with Array.prototype --- no exotic Array magic
+ assertFalse(Array.isArray(O));
+ assertEquals(0, O.length);
+ assertSame(Array.prototype, Object.getPrototypeOf(O));
+})();
+
+
+(function testReflectConstructNewTargetSloppy() {
+ function Constructor() { this[9] = 1; }
+ var O = Reflect.construct(Constructor, [], Array);
+ assertEquals(1, O[9]);
+ // Ordinary object with Array.prototype --- no exotic Array magic
+ assertFalse(Array.isArray(O));
+ assertEquals(0, O.length);
+ assertSame(Array.prototype, Object.getPrototypeOf(O));
+})();
+
+
+(function testReflectConstructNewTargetStrict2() {
+ "use strict";
+ function Constructor() { this[9] = 1; }
+ Constructor.prototype.add = function(x) {
+ this[this.length] = x; return this;
+ }
+ var O = Reflect.construct(Array, [1, 2, 3], Constructor);
+ // Exotic Array object with Constructor.prototype
+ assertTrue(Array.isArray(O));
+ assertSame(Constructor.prototype, Object.getPrototypeOf(O));
+ assertFalse(O instanceof Array);
+ assertEquals(3, O.length);
+ assertEquals(undefined, O[9]);
+ assertSame(O, O.add(4));
+ assertEquals(4, O.length);
+ assertEquals(4, O[3]);
+})();
+
+
+(function testReflectConstructNewTargetSloppy2() {
+ function Constructor() { this[9] = 1; }
+ Constructor.prototype.add = function(x) {
+ this[this.length] = x; return this;
+ }
+ var O = Reflect.construct(Array, [1, 2, 3], Constructor);
+ // Exotic Array object with Constructor.prototype
+ assertTrue(Array.isArray(O));
+ assertSame(Constructor.prototype, Object.getPrototypeOf(O));
+ assertFalse(O instanceof Array);
+ assertEquals(3, O.length);
+ assertEquals(undefined, O[9]);
+ assertSame(O, O.add(4));
+ assertEquals(4, O.length);
+ assertEquals(4, O[3]);
+})();
+
+
+(function testReflectConstructNewTargetStrict3() {
+ "use strict";
+ function A() {}
+ function B() {}
+ var O = Reflect.construct(A, [], B);
+ // TODO(caitp): bug: newTarget prototype is not used if it is not
+ // explicitly set.
+ //assertSame(B.prototype, Object.getPrototypeOf(O));
+})();
+
+
+(function testReflectConstructNewTargetSloppy3() {
+ function A() {}
+ function B() {}
+ var O = Reflect.construct(A, [], B);
+ // TODO(caitp): bug: newTarget prototype is not used if it is not
+ // explicitly set.
+ //assertSame(B.prototype, Object.getPrototypeOf(O));
+})();
+
+
+(function testAppliedArgumentsLength() {
+ function lengthStrict() { 'use strict'; this.a = arguments.length; }
+ function lengthSloppy() { this.a = arguments.length; }
+
+ assertEquals(0, Reflect.construct(lengthStrict, []).a);
+ assertEquals(0, Reflect.construct(lengthSloppy, []).a);
+ assertEquals(0, Reflect.construct(lengthStrict, {}).a);
+ assertEquals(0, Reflect.construct(lengthSloppy, {}).a);
+
+ for (var i = 0; i < 256; ++i) {
+ assertEquals(i, Reflect.construct(lengthStrict, new Array(i)).a);
+ assertEquals(i, Reflect.construct(lengthSloppy, new Array(i)).a);
+ assertEquals(i, Reflect.construct(lengthStrict, { length: i }).a);
+ assertEquals(i, Reflect.construct(lengthSloppy, { length: i }).a);
+ }
+})();
+
+
+(function testAppliedArgumentsLengthThrows() {
+ function noopStrict() { 'use strict'; }
+ function noopSloppy() { }
+ function MyError() {}
+
+ var argsList = {};
+ Object.defineProperty(argsList, "length", {
+ get: function() { throw new MyError(); }
+ });
+
+ assertThrows(function() {
+ Reflect.construct(noopStrict, argsList);
+ }, MyError);
+
+ assertThrows(function() {
+ Reflect.construct(noopSloppy, argsList);
+ }, MyError);
+})();
+
+
+(function testAppliedArgumentsElementThrows() {
+ function noopStrict() { 'use strict'; }
+ function noopSloppy() { }
+ function MyError() {}
+
+ var argsList = { length: 1 };
+ Object.defineProperty(argsList, "0", {
+ get: function() { throw new MyError(); }
+ });
+
+ assertThrows(function() {
+ Reflect.construct(noopStrict, argsList);
+ }, MyError);
+
+ assertThrows(function() {
+ Reflect.construct(noopSloppy, argsList);
+ }, MyError);
+})();
+
+
+(function testAppliedNonFunctionStrict() {
+ 'use strict';
+ assertThrows(function() { Reflect.construct(void 0, []); }, TypeError);
+ assertThrows(function() { Reflect.construct(null, []); }, TypeError);
+ assertThrows(function() { Reflect.construct(123, []); }, TypeError);
+ assertThrows(function() { Reflect.construct("str", []); }, TypeError);
+ assertThrows(function() { Reflect.construct(Symbol("x"), []); }, TypeError);
+ assertThrows(function() { Reflect.construct(/123/, []); }, TypeError);
+ assertThrows(function() { Reflect.construct(NaN, []); }, TypeError);
+ assertThrows(function() { Reflect.construct({}, []); }, TypeError);
+ assertThrows(function() { Reflect.construct([], []); }, TypeError);
+})();
+
+
+(function testAppliedNonFunctionSloppy() {
+ assertThrows(function() { Reflect.construct(void 0, []); }, TypeError);
+ assertThrows(function() { Reflect.construct(null, []); }, TypeError);
+ assertThrows(function() { Reflect.construct(123, []); }, TypeError);
+ assertThrows(function() { Reflect.construct("str", []); }, TypeError);
+ assertThrows(function() { Reflect.construct(Symbol("x"), []); }, TypeError);
+ assertThrows(function() { Reflect.construct(/123/, []); }, TypeError);
+ assertThrows(function() { Reflect.construct(NaN, []); }, TypeError);
+ assertThrows(function() { Reflect.construct({}, []); }, TypeError);
+ assertThrows(function() { Reflect.construct([], []); }, TypeError);
+})();
+
+
+(function testAppliedArgumentsNonList() {
+ function noopStrict() { 'use strict'; }
+ function noopSloppy() {}
+ assertThrows(function() { Reflect.construct(noopStrict, null); }, TypeError);
+ assertThrows(function() { Reflect.construct(noopSloppy, null); }, TypeError);
+ assertThrows(function() { Reflect.construct(noopStrict, 1); }, TypeError);
+ assertThrows(function() { Reflect.construct(noopSloppy, 1); }, TypeError);
+ assertThrows(function() { Reflect.construct(noopStrict, "BAD"); }, TypeError);
+ assertThrows(function() { Reflect.construct(noopSloppy, "BAD"); }, TypeError);
+ assertThrows(function() { Reflect.construct(noopStrict, true); }, TypeError);
+ assertThrows(function() { Reflect.construct(noopSloppy, true); }, TypeError);
+ var sym = Symbol("x");
+ assertThrows(function() { Reflect.construct(noopStrict, sym); }, TypeError);
+ assertThrows(function() { Reflect.construct(noopSloppy, sym); }, TypeError);
+})();
+
+
+(function testAppliedArgumentValue() {
+ function firstStrict(a) { 'use strict'; this.a = a; }
+ function firstSloppy(a) { this.a = a; }
+ function lastStrict(a) {
+ 'use strict'; this.a = arguments[arguments.length - 1]; }
+ function lastSloppy(a) { this.a = arguments[arguments.length - 1]; }
+ function sumStrict() {
+ 'use strict';
+ var sum = arguments[0];
+ for (var i = 1; i < arguments.length; ++i) {
+ sum += arguments[i];
+ }
+ this.a = sum;
+ }
+ function sumSloppy() {
+ var sum = arguments[0];
+ for (var i = 1; i < arguments.length; ++i) {
+ sum += arguments[i];
+ }
+ this.a = sum;
+ }
+
+ assertEquals("OK!", Reflect.construct(firstStrict, ["OK!"]).a);
+ assertEquals("OK!", Reflect.construct(firstSloppy, ["OK!"]).a);
+ assertEquals("OK!", Reflect.construct(firstStrict,
+ { 0: "OK!", length: 1 }).a);
+ assertEquals("OK!", Reflect.construct(firstSloppy,
+ { 0: "OK!", length: 1 }).a);
+ assertEquals("OK!", Reflect.construct(lastStrict,
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, "OK!"]).a);
+ assertEquals("OK!", Reflect.construct(lastSloppy,
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, "OK!"]).a);
+ assertEquals("OK!", Reflect.construct(lastStrict,
+ { 9: "OK!", length: 10 }).a);
+ assertEquals("OK!", Reflect.construct(lastSloppy,
+ { 9: "OK!", length: 10 }).a);
+ assertEquals("TEST", Reflect.construct(sumStrict,
+ ["T", "E", "S", "T"]).a);
+ assertEquals("TEST!!", Reflect.construct(sumStrict,
+ ["T", "E", "S", "T", "!", "!"]).a);
+ assertEquals(10, Reflect.construct(sumStrict,
+ { 0: 1, 1: 2, 2: 3, 3: 4, length: 4 }).a);
+ assertEquals("TEST", Reflect.construct(sumSloppy,
+ ["T", "E", "S", "T"]).a);
+ assertEquals("TEST!!", Reflect.construct(sumSloppy,
+ ["T", "E", "S", "T", "!", "!"]).a);
+ assertEquals(10, Reflect.construct(sumSloppy,
+ { 0: 1, 1: 2, 2: 3, 3: 4, length: 4 }).a);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-3501.js b/deps/v8/test/mjsunit/harmony/regress/regress-3501.js
index 4b449e458f..4b449e458f 100644
--- a/deps/v8/test/mjsunit/regress/regress-3501.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-3501.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-448730.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js
index 31d276aa83..31d276aa83 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-448730.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-448730.js
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-451770.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js
index 942814a316..942814a316 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-451770.js
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-451770.js
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js
new file mode 100644
index 0000000000..c30260db72
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-461520.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+var fuse = 1;
+var handler = {
+ get: function() { return function() {} },
+ getPropertyDescriptor: function() {
+ if (fuse-- == 0) throw "please die";
+ return {value: function() {}, configurable: true};
+ }
+};
+
+var p = Proxy.create(handler);
+var o = Object.create(p);
+with (o) { f() }
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-465671-null.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-465671-null.js
new file mode 100644
index 0000000000..d24599c385
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-465671-null.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-arrow-functions
+
+// This used to trigger a segfault because of NULL being accessed.
+function f() {
+ var a = [10];
+ try {
+ f();
+ } catch(e) {
+ a.map((v) => v + 1);
+ }
+}
+f();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-crbug-465671.js b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-465671.js
new file mode 100644
index 0000000000..24f4d05475
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/regress/regress-crbug-465671.js
@@ -0,0 +1,16 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-arrow-functions
+
+// This used to trigger crash because of an unhandled stack overflow.
+function f() {
+ var a = [10];
+ try {
+ f();
+ } catch(e) {
+ a.map(v => v + 1);
+ }
+}
+f();
diff --git a/deps/v8/test/mjsunit/harmony/rest-params.js b/deps/v8/test/mjsunit/harmony/rest-params.js
index 5bb258ee68..341cb33087 100644
--- a/deps/v8/test/mjsunit/harmony/rest-params.js
+++ b/deps/v8/test/mjsunit/harmony/rest-params.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --harmony-rest-parameters
+// Flags: --harmony-rest-parameters --harmony-classes
(function testRestIndex() {
assertEquals(5, (function(...args) { return args.length; })(1,2,3,4,5));
@@ -180,3 +180,35 @@ var O = {
assertEquals([], ((...args) => args)());
assertEquals([1,2,3], ((...args) => args)(1,2,3));
})();*/
+
+
+(function testRestParamsWithNewTarget() {
+ "use strict";
+ class Base {
+ constructor(...a) {
+ this.base = a;
+ assertEquals(arguments.length, a.length);
+ var args = [];
+ for (var i = 0; i < arguments.length; ++i) {
+ args.push(arguments[i]);
+ }
+ assertEquals(args, a);
+ }
+ }
+ class Child extends Base {
+ constructor(...b) {
+ super(1, 2, 3);
+ this.child = b;
+ assertEquals(arguments.length, b.length);
+ var args = [];
+ for (var i = 0; i < arguments.length; ++i) {
+ args.push(arguments[i]);
+ }
+ assertEquals(args, b);
+ }
+ }
+
+ var c = new Child(1, 2, 3);
+ assertEquals([1, 2, 3], c.child);
+ assertEquals([1, 2, 3], c.base);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/typedarrays.js b/deps/v8/test/mjsunit/harmony/typedarrays.js
index a4d6e7927a..0cdb7ed396 100644
--- a/deps/v8/test/mjsunit/harmony/typedarrays.js
+++ b/deps/v8/test/mjsunit/harmony/typedarrays.js
@@ -530,7 +530,7 @@ function TestTypedArraysWithIllegalIndices() {
* assertEquals(undefined, a[-Infinity]);
*/
a[1.5] = 10;
- assertEquals(undefined, a[1.5]);
+ assertEquals(10, a[1.5]);
var nan = Math.sqrt(-1);
a[nan] = 5;
assertEquals(5, a[nan]);
@@ -579,7 +579,7 @@ function TestTypedArraysWithIllegalIndicesStrict() {
* assertEquals(undefined, a[-Infinity]);
*/
a[1.5] = 10;
- assertEquals(undefined, a[1.5]);
+ assertEquals(10, a[1.5]);
var nan = Math.sqrt(-1);
a[nan] = 5;
assertEquals(5, a[nan]);
diff --git a/deps/v8/test/mjsunit/json2.js b/deps/v8/test/mjsunit/json2.js
index f048f05290..f68c76c92a 100644
--- a/deps/v8/test/mjsunit/json2.js
+++ b/deps/v8/test/mjsunit/json2.js
@@ -183,3 +183,8 @@ try {
externalizeString(str, true);
} catch (e) { }
TestStringify("\"external\"", str, null, 0);
+
+var o = {};
+o.somespecialproperty = 10;
+o["\x19"] = 10;
+assertThrows("JSON.parse('{\"somespecialproperty\":100, \"\x19\":10}')");
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index dc96a1de35..4b3ae5dd7c 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -68,6 +68,7 @@
'compare-known-objects-slow': [PASS, NO_VARIANTS],
'elements-kind': [PASS, NO_VARIANTS],
'opt-elements-kind': [PASS, NO_VARIANTS],
+ 'smi-representation': [PASS, NO_VARIANTS],
# Some tests are just too slow to run for now.
'big-object-literal': [PASS, NO_VARIANTS],
@@ -95,25 +96,11 @@
'debug-evaluate-locals': [PASS, NO_VARIANTS],
'debug-liveedit-check-stack': [PASS, NO_VARIANTS], # only in no-snap mode.
'debug-liveedit-double-call': [PASS, NO_VARIANTS],
- 'debug-step-stub-callfunction': [PASS, NO_VARIANTS],
'debug-set-variable-value': [PASS, NO_VARIANTS],
- 'debug-stepin-accessor': [PASS, NO_VARIANTS],
- 'debug-stepin-builtin': [PASS, NO_VARIANTS],
- 'debug-stepin-constructor': [PASS, NO_VARIANTS],
- 'debug-stepin-function-call': [PASS, NO_VARIANTS],
- 'debug-stepnext-do-while': [PASS, NO_VARIANTS],
'debug-stepout-scope-part1': [PASS, NO_VARIANTS],
'debug-stepout-scope-part2': [PASS, NO_VARIANTS],
'debug-stepout-scope-part3': [PASS, NO_VARIANTS],
- 'es6/debug-stepin-microtasks': [PASS, NO_VARIANTS],
- 'es6/debug-stepnext-for': [PASS, NO_VARIANTS],
- 'harmony/debug-evaluate-blockscopes': [PASS, NO_VARIANTS],
-
- # TODO(jarin): Some tests don't like --turbo-deoptimzation very much.
- 'asm/embenchen/lua_binarytrees': [SKIP],
- 'es6/symbols': [PASS, NO_VARIANTS],
- 'regress/regress-354433': [PASS, NO_VARIANTS], # only on ARM simulator.
- 'regress/regress-crbug-259300': [PASS, NO_VARIANTS],
+ 'es6/debug-evaluate-blockscopes': [PASS, NO_VARIANTS],
##############################################################################
# Too slow in debug mode with --stress-opt mode.
@@ -123,6 +110,11 @@
'regress/regress-create-exception': [PASS, ['mode == debug', SKIP]],
##############################################################################
+ # Too slow in debug mode for validation of elements.
+ 'regress/regress-430201': [PASS, ['mode == debug', SKIP]],
+ 'regress/regress-430201b': [PASS, ['mode == debug', SKIP]],
+
+ ##############################################################################
# Too slow in debug mode for GC stress mode.
'regress/regress-crbug-217858': [PASS, ['mode == debug', SKIP]],
@@ -139,6 +131,7 @@
# Very slow on ARM and MIPS, contains no architecture dependent code.
'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', TIMEOUT]],
+ 'regress/regress-3976': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', SKIP]],
##############################################################################
# This test expects to reach a certain recursion depth, which may not work
@@ -185,6 +178,10 @@
# nosse2. Also for arm novfp3.
'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == x87 or arch == arm and simulator == True', PASS]],
+ # BUG(v8:3985). Wrong materialization of arguments object after throwing
+ # an exception.
+ 'regress/regress-3985': [PASS, FAIL],
+
# Skip endain dependent test for mips due to different typed views of the same
# array buffer.
'nans': [PASS, ],
@@ -194,6 +191,9 @@
# Too slow for slow variants.
'asm/embenchen/*': [PASS, SLOW, FAST_VARIANTS],
+
+ # BUG(v8:3838).
+ 'regress/regress-3116': [PASS, ['isolates', FLAKY]],
}], # ALWAYS
##############################################################################
@@ -210,7 +210,7 @@
'fast-prototype': [SKIP],
'field-type-tracking': [SKIP],
'getters-on-elements': [SKIP],
- 'harmony/block-let-crankshaft': [SKIP],
+ 'es6/block-let-crankshaft': [SKIP],
'opt-elements-kind': [SKIP],
'osr-elements-kind': [SKIP],
'regress/regress-crbug-137689': [SKIP],
@@ -244,8 +244,10 @@
# Issue 3723.
'regress/regress-3717': [SKIP],
- # Issue 3776.
- 'debug-stepframe': [SKIP],
+ # Issue 3924.
+ 'mjsunit/debug-clearbreakpointgroup': [SKIP],
+ # Issue 3969.
+ 'mjsunit/debug-references': [SKIP],
}], # 'gc_stress == True'
##############################################################################
@@ -585,7 +587,17 @@
'readonly': [SKIP],
'array-feedback': [SKIP],
+ # Deopting uses just enough memory to make this one OOM.
+ 'regress/regress-3976': [SKIP],
+
# Deopt every n garbage collections collides with deopt every n times.
'regress/regress-2653': [SKIP],
}], # 'deopt_fuzzer == True'
+
+##############################################################################
+['arch == ppc and simulator_run == True or arch == ppc64 and simulator_run == True', {
+
+ # take too long with the simulator.
+ 'regress/regress-1132': [SKIP],
+}], # 'arch == ppc and simulator_run == True'
]
diff --git a/deps/v8/test/mjsunit/regexp-stack-overflow.js b/deps/v8/test/mjsunit/regexp-stack-overflow.js
new file mode 100644
index 0000000000..63f6971ace
--- /dev/null
+++ b/deps/v8/test/mjsunit/regexp-stack-overflow.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+var re = /\w/;
+re.test("a"); // Trigger regexp compile.
+
+function rec() {
+ try {
+ return rec();
+ } catch (e) {
+ return re.test("b");
+ }
+}
+
+assertTrue(rec());
diff --git a/deps/v8/test/mjsunit/regress/regress-1530.js b/deps/v8/test/mjsunit/regress/regress-1530.js
index 20d1f265c0..fa86f62ce5 100644
--- a/deps/v8/test/mjsunit/regress/regress-1530.js
+++ b/deps/v8/test/mjsunit/regress/regress-1530.js
@@ -80,8 +80,10 @@ assertFalse(Object.getOwnPropertyDescriptor(f, 'prototype').writable);
assertThrows("'use strict'; f.prototype = {}");
assertThrows("Object.defineProperty(f, 'prototype', { value: {} })");
-// Verify that non-writability of other properties is respected.
-assertThrows("Object.defineProperty(f, 'name', { value: {} })");
-assertThrows("Object.defineProperty(f, 'length', { value: {} })");
+// Verify that non-configurability of other properties is respected, but
+// non-writability is ignored by Object.defineProperty().
+// name and length are configurable in ES6
+Object.defineProperty(f, 'name', { value: {} });
+Object.defineProperty(f, 'length', { value: {} });
assertThrows("Object.defineProperty(f, 'caller', { value: {} })");
assertThrows("Object.defineProperty(f, 'arguments', { value: {} })");
diff --git a/deps/v8/test/mjsunit/regress/regress-270142.js b/deps/v8/test/mjsunit/regress/regress-270142.js
index 6e0865c4f8..63f4d1414e 100644
--- a/deps/v8/test/mjsunit/regress/regress-270142.js
+++ b/deps/v8/test/mjsunit/regress/regress-270142.js
@@ -39,7 +39,7 @@ function g(x) {
function checkNameDescriptor(f) {
var descriptor = Object.getOwnPropertyDescriptor(f, "name");
- assertFalse(descriptor.configurable);
+ assertTrue(descriptor.configurable);
assertFalse(descriptor.enumerable);
assertFalse(descriptor.writable);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-330046.js b/deps/v8/test/mjsunit/regress/regress-330046.js
index d94b804ac0..eb0d3f38a2 100644
--- a/deps/v8/test/mjsunit/regress/regress-330046.js
+++ b/deps/v8/test/mjsunit/regress/regress-330046.js
@@ -58,4 +58,4 @@ f(10, o3);
// The old code is already deoptimized, but f still points to it.
// Disassembling it will crash.
-%DebugDisassembleFunction(f);
+%DisassembleFunction(f);
diff --git a/deps/v8/test/mjsunit/regress/regress-3960.js b/deps/v8/test/mjsunit/regress/regress-3960.js
new file mode 100644
index 0000000000..4aaab0b067
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3960.js
@@ -0,0 +1,36 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test that setting break point is works correctly when the debugger is
+// activated late, which leads to duplicate shared function infos.
+
+(function() {
+ var Debug = %GetDebugContext().Debug;
+
+ function listener(event, exec_state, event_data, data) {
+ if (event != Debug.DebugEvent.Break) return;
+ try {
+ assertTrue(/foo/.test(exec_state.frame(0).sourceLineText()));
+ break_count++;
+ } catch (e) {
+ exception = e;
+ }
+ }
+
+ for (var i = 0; i < 3; i++) {
+ var foo = function() { a = 1; }
+ var exception = null;
+ var break_count = 0;
+ Debug.setListener(listener);
+ if (i < 2) Debug.setBreakPoint(foo, 0, 0);
+ assertTrue(/\[B\d\]a = 1/.test(Debug.showBreakPoints(foo)));
+ foo();
+ assertEquals(1, break_count);
+ assertNull(exception);
+ }
+
+ Debug.setListener(null);
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-3969.js b/deps/v8/test/mjsunit/regress/regress-3969.js
new file mode 100644
index 0000000000..4659e1caf8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3969.js
@@ -0,0 +1,36 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Inner() {
+ this.property = "OK";
+ this.o2 = 1;
+}
+
+function Outer(inner) {
+ this.inner = inner;
+}
+
+var inner = new Inner();
+var outer = new Outer(inner);
+
+Outer.prototype.boom = function() {
+ return this.inner.property;
+}
+
+assertEquals("OK", outer.boom());
+assertEquals("OK", outer.boom());
+%OptimizeFunctionOnNextCall(Outer.prototype.boom);
+assertEquals("OK", outer.boom());
+
+inner = undefined;
+%SetAllocationTimeout(0 /*interval*/, 2 /*timeout*/);
+// Call something that will do GC while holding a handle to outer's map.
+// The key is that this lets inner's map die while keeping outer's map alive.
+delete outer.inner;
+
+outer = new Outer({field: 1.51, property: "OK"});
+
+assertEquals("OK", outer.boom());
diff --git a/deps/v8/test/mjsunit/regress/regress-3976.js b/deps/v8/test/mjsunit/regress/regress-3976.js
new file mode 100644
index 0000000000..c151f689f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3976.js
@@ -0,0 +1,80 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --max-old-space-size=60
+
+table = [];
+
+for (var i = 0; i < 32; i++) {
+ table[i] = String.fromCharCode(i + 0x410);
+}
+
+
+var random = (function() {
+ var seed = 10;
+ return function() {
+ seed = (seed * 1009) % 8831;
+ return seed;
+ };
+})();
+
+
+function key(length) {
+ var s = "";
+ for (var i = 0; i < length; i++) {
+ s += table[random() % 32];
+ }
+ return '"' + s + '"';
+}
+
+
+function value() {
+ return '[{' + '"field1" : ' + random() + ', "field2" : ' + random() + '}]';
+}
+
+
+function generate(n) {
+ var s = '{';
+ for (var i = 0; i < n; i++) {
+ if (i > 0) s += ', ';
+ s += key(random() % 10 + 7);
+ s += ':';
+ s += value();
+ }
+ s += '}';
+ return s;
+}
+
+
+print("generating");
+
+var str = generate(50000);
+
+print("parsing " + str.length);
+JSON.parse(str);
+
+print("done");
diff --git a/deps/v8/test/mjsunit/regress/regress-3985.js b/deps/v8/test/mjsunit/regress/regress-3985.js
new file mode 100644
index 0000000000..6dbc4bdadd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-3985.js
@@ -0,0 +1,45 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var shouldThrow = false;
+
+function h() {
+ try { // Prevent inlining in Crankshaft.
+ } catch(e) { }
+ var res = g.arguments[0].x;
+ if (shouldThrow) {
+ throw res;
+ }
+ return res;
+}
+
+function g(o) { h(); }
+
+function f1() {
+ var o = { x : 1 };
+ g(o);
+ return o.x;
+}
+
+function f2() {
+ var o = { x : 2 };
+ g(o);
+ return o.x;
+}
+
+f1();
+f2();
+f1();
+f2();
+%OptimizeFunctionOnNextCall(f1);
+%OptimizeFunctionOnNextCall(f2);
+shouldThrow = true;
+try { f1(); } catch(e) {
+ assertEquals(e, 1);
+}
+try { f2(); } catch(e) {
+ assertEquals(e, 2);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-4023.js b/deps/v8/test/mjsunit/regress/regress-4023.js
new file mode 100644
index 0000000000..902741f6f5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4023.js
@@ -0,0 +1,67 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc --block-concurrent-recompilation
+
+function Inner() {
+ this.property = "OK";
+ this.prop2 = 1;
+}
+
+function Outer() {
+ this.o = "u";
+}
+function KeepMapAlive(o) {
+ return o.o;
+}
+function SetInner(o, i) {
+ o.inner_field = i;
+}
+function Crash(o) {
+ return o.inner_field.property;
+}
+
+var inner = new Inner();
+var outer = new Outer();
+
+// Collect type feedback.
+SetInner(new Outer(), inner);
+SetInner(outer, inner);
+
+// This function's only purpose is to stash away a Handle that keeps
+// outer's map alive during the gc() call below. We store this handle
+// on the compiler thread :-)
+KeepMapAlive(outer);
+KeepMapAlive(outer);
+%OptimizeFunctionOnNextCall(KeepMapAlive, "concurrent");
+KeepMapAlive(outer);
+
+// So far, all is well. Collect type feedback and optimize.
+print(Crash(outer));
+print(Crash(outer));
+%OptimizeFunctionOnNextCall(Crash);
+print(Crash(outer));
+
+// Null out references and perform GC. This will keep outer's map alive
+// (due to the handle created above), but will let inner's map die. Hence,
+// inner_field's field type stored in outer's map will get cleared.
+inner = undefined;
+outer = undefined;
+gc();
+
+// We could unblock the compiler thread now. But why bother?
+
+// Now optimize SetInner while inner_field's type is still cleared!
+// This will generate optimized code that stores arbitrary objects
+// into inner_field without checking their type against the field type.
+%OptimizeFunctionOnNextCall(SetInner);
+
+// Use the optimized code to store an arbitrary object into
+// o2's inner_field, without triggering any dependent code deopts...
+var o2 = new Outer();
+SetInner(o2, { invalid: 1.51, property: "OK" });
+// ...and then use the existing code expecting an Inner-class object to
+// read invalid data (in this case, a raw double).
+// We crash trying to convert the raw double into a printable string.
+print(Crash(o2));
diff --git a/deps/v8/test/mjsunit/regress/regress-4027.js b/deps/v8/test/mjsunit/regress/regress-4027.js
new file mode 100644
index 0000000000..3a5d11b8e5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-4027.js
@@ -0,0 +1,60 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+function Inner() {
+ this.inner_name = "inner";
+}
+
+function Boom() {
+ this.boom = "boom";
+}
+
+function Outer() {
+ this.outer_name = "outer";
+}
+
+function SetInner(inner, value) {
+ inner.prop = value;
+}
+
+function SetOuter(outer, value) {
+ outer.inner = value;
+}
+
+var inner1 = new Inner();
+var inner2 = new Inner();
+
+SetInner(inner1, 10);
+SetInner(inner2, 10);
+
+var outer1 = new Outer();
+var outer2 = new Outer();
+var outer3 = new Outer();
+
+SetOuter(outer1, inner1);
+SetOuter(outer1, inner1);
+SetOuter(outer1, inner1);
+
+SetOuter(outer2, inner2);
+SetOuter(outer2, inner2);
+SetOuter(outer2, inner2);
+
+SetOuter(outer3, inner2);
+SetOuter(outer3, inner2);
+SetOuter(outer3, inner2);
+
+
+SetInner(inner2, 6.5);
+
+outer1 = null;
+inner1 = null;
+
+gc();
+
+var boom = new Boom();
+SetOuter(outer2, boom);
+
+gc();
diff --git a/deps/v8/test/preparser/strict-const.js b/deps/v8/test/mjsunit/regress/regress-430201b.js
index 97b908128e..056504d1d7 100644
--- a/deps/v8/test/preparser/strict-const.js
+++ b/deps/v8/test/mjsunit/regress/regress-430201b.js
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -24,8 +24,20 @@
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Flags: --noharmony-scoping
-"use strict";
-const x = 42;
+// Flags: --allow-natives-syntax --expose-gc
+
+(function() {
+ var array_1 = [];
+
+ %SetFlags("--stress-compaction");
+ for (var a = 0; a < 10000; a++) { array_1[a * 100] = 0; }
+
+ gc();
+ gc();
+
+ var array_2 = [];
+ for (var i = 0; i < 321361; i++) {
+ array_2[i] = String.fromCharCode(i)[0];
+ }
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-460937.js b/deps/v8/test/mjsunit/regress/regress-460937.js
new file mode 100644
index 0000000000..cd57f93328
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-460937.js
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ var a = new Array(100000);
+ var i = 0;
+ while (!%HasFastDoubleElements(a)) {
+ a[i] = i;
+ i += 0.1;
+ }
+ a[1] = 1.5;
+}
+
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-463028.js b/deps/v8/test/mjsunit/regress/regress-463028.js
new file mode 100644
index 0000000000..1454ef1aea
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-463028.js
@@ -0,0 +1,18 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o = {}
+Object.defineProperty(o, "z", {
+ set: function() {
+ %DeoptimizeFunction(f);
+ },
+});
+
+function f(o) {
+ return 19 + (void(o.z = 12));
+}
+
+f(o);
diff --git a/deps/v8/test/mjsunit/regress/regress-469605.js b/deps/v8/test/mjsunit/regress/regress-469605.js
new file mode 100644
index 0000000000..65725117bd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-469605.js
@@ -0,0 +1,43 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function counter() {
+ var i = 100;
+ return function() {
+ if (i-- > 0) return i;
+ throw "done";
+ }
+}
+
+var c1 = counter();
+var c2 = counter();
+
+var f = (function() {
+ "use asm";
+ return function f(i) {
+ i = i|0;
+ do {
+ if (i > 0) c1();
+ else c2();
+ } while (true);
+ }
+})();
+
+assertThrows(function() { f(0); });
+assertThrows(function() { f(1); });
+
+var c3 = counter();
+
+var g = (function() {
+ "use asm";
+ return function g(i) {
+ i = i + 1;
+ do {
+ i = c3(i);
+ } while (true);
+ }
+})();
+
+assertThrows(function() { g(0); });
+assertThrows(function() { g(1); });
diff --git a/deps/v8/test/mjsunit/regress/regress-470804.js b/deps/v8/test/mjsunit/regress/regress-470804.js
new file mode 100644
index 0000000000..cebb91f7e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-470804.js
@@ -0,0 +1,53 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --expose-gc
+
+function f() {
+ this.foo00 = 0;
+ this.foo01 = 0;
+ this.foo02 = 0;
+ this.foo03 = 0;
+ this.foo04 = 0;
+ this.foo05 = 0;
+ this.foo06 = 0;
+ this.foo07 = 0;
+ this.foo08 = 0;
+ this.foo09 = 0;
+ this.foo0a = 0;
+ this.foo0b = 0;
+ this.foo0c = 0;
+ this.foo0d = 0;
+ this.foo0e = 0;
+ this.foo0f = 0;
+ this.foo10 = 0;
+ this.foo11 = 0;
+ this.foo12 = 0;
+ this.foo13 = 0;
+ this.foo14 = 0;
+ this.foo15 = 0;
+ this.foo16 = 0;
+ this.foo17 = 0;
+ this.foo18 = 0;
+ this.foo19 = 0;
+ this.foo1a = 0;
+ this.foo1b = 0;
+ this.foo1c = 0;
+ this.foo1d = 0;
+ this.foo1e = 0;
+ this.foo1f = 0;
+ this.d = 1.3;
+ gc();
+ this.boom = 230;
+ this.boom = 1.4;
+}
+
+function g() {
+ return new f();
+}
+g();
+g();
+var o = g();
+assertEquals(0, o.foo00);
+assertEquals(1.4, o.boom);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-385002.js b/deps/v8/test/mjsunit/regress/regress-crbug-385002.js
index 34713e27d4..e9023e1d6d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-385002.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-385002.js
@@ -4,7 +4,7 @@
// Flags: --stack-size=200 --allow-natives-syntax
-%Break(); // Schedule an interrupt that does not go away.
+%ScheduleBreak(); // Schedule an interrupt that does not go away.
function f() { f(); }
assertThrows(f, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-401915.js b/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
index 96dce04868..67ea19158e 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-401915.js
@@ -10,7 +10,7 @@ Debug.setBreakOnException();
try {
try {
- %DebugPushPromise(new Promise(function() {}));
+ %DebugPushPromise(new Promise(function() {}), function() {});
} catch (e) {
}
throw new Error();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-465564.js b/deps/v8/test/mjsunit/regress/regress-crbug-465564.js
new file mode 100644
index 0000000000..ea0c8dcf66
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-465564.js
@@ -0,0 +1,7 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --cache=code
+
+assertEquals(-1, %StringCompare("a", "b"));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-467047.js b/deps/v8/test/mjsunit/regress/regress-crbug-467047.js
new file mode 100644
index 0000000000..373e984a2c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-467047.js
@@ -0,0 +1,17 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --stack-size=100
+
+function captureMatch(re) {
+ var local_variable = 0;
+ "abcd".replace(re, function() { });
+ assertEquals("abcd", RegExp.input);
+ assertEquals("a", RegExp.leftContext);
+ assertEquals("bc", RegExp.lastMatch);
+ assertEquals("d", RegExp.rightContext);
+ assertEquals("foo", captureMatch(/^bar/));
+}
+
+assertThrows(function() { captureMatch(/(bc)/) }, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-467531.js b/deps/v8/test/mjsunit/regress/regress-crbug-467531.js
new file mode 100644
index 0000000000..73256c7acc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-467531.js
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-filter=* --always-opt
+
+assertThrows(function() {
+ "use strict";
+ try {
+ x = ref_error;
+ let x = 0;
+ } catch (e) {
+ throw e;
+ }
+}, ReferenceError);
+
+assertThrows(function() {
+ "use strict";
+ try {
+ x = ref_error;
+ let x = 0;
+ } finally {
+ // re-throw
+ }
+}, ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-filter-contexts.js b/deps/v8/test/mjsunit/regress/regress-filter-contexts.js
new file mode 100644
index 0000000000..d2abe00325
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-filter-contexts.js
@@ -0,0 +1,14 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() { return f.x; }
+f.__proto__ = null;
+f.prototype = "";
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/deps/v8/test/mjsunit/regress/regress-function-length-strict.js b/deps/v8/test/mjsunit/regress/regress-function-length-strict.js
index 700f34a67a..77cca24054 100644
--- a/deps/v8/test/mjsunit/regress/regress-function-length-strict.js
+++ b/deps/v8/test/mjsunit/regress/regress-function-length-strict.js
@@ -37,5 +37,5 @@ var desc = Object.getOwnPropertyDescriptor(foo, 'length');
assertEquals(3, desc.value);
assertFalse(desc.writable);
assertFalse(desc.enumerable);
-assertFalse(desc.configurable);
+assertTrue(desc.configurable);
assertThrows(function() { foo.length = 2; }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/string-compare-memcmp.js b/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
index 45f47343ee..ae4b33ace9 100644
--- a/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
+++ b/deps/v8/test/mjsunit/regress/string-compare-memcmp.js
@@ -4,4 +4,4 @@
// Flags: --allow-natives-syntax
-assertEquals(-1, %StringCompare("abc\u0102", "abc\u0201"));
+assertEquals(-1, %StringCompareRT("abc\u0102", "abc\u0201"));
diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js
index f80a627b24..b256033b53 100644
--- a/deps/v8/test/mjsunit/stack-traces.js
+++ b/deps/v8/test/mjsunit/stack-traces.js
@@ -94,6 +94,37 @@ function testAnonymousMethod() {
(function () { FAIL }).call([1, 2, 3]);
}
+function testFunctionName() {
+ function gen(name, counter) {
+ var f = function foo() {
+ if (counter === 0) {
+ FAIL;
+ }
+ gen(name, counter - 1)();
+ }
+ if (counter === 4) {
+ Object.defineProperty(f, 'name', {get: function(){ throw 239; }});
+ } else if (counter == 3) {
+ Object.defineProperty(f, 'name', {value: 'boo' + '_' + counter});
+ } else {
+ Object.defineProperty(f, 'name', {writable: true});
+ if (counter === 2)
+ f.name = 42;
+ else
+ f.name = name + '_' + counter;
+ }
+ return f;
+ }
+ gen('foo', 4)();
+}
+
+function testFunctionInferredName() {
+ var f = function() {
+ FAIL;
+ }
+ f();
+}
+
function CustomError(message, stripPoint) {
this.message = message;
Error.captureStackTrace(this, stripPoint);
@@ -261,6 +292,9 @@ testTrace("testValue", testValue, ["at Number.causeError"]);
testTrace("testConstructor", testConstructor, ["new Plonk"]);
testTrace("testRenamedMethod", testRenamedMethod, ["Wookie.a$b$c$d [as d]"]);
testTrace("testAnonymousMethod", testAnonymousMethod, ["Array.<anonymous>"]);
+testTrace("testFunctionName", testFunctionName,
+ [" at foo_0 ", " at foo_1", " at foo ", " at boo_3 ", " at foo "]);
+testTrace("testFunctionInferredName", testFunctionInferredName, [" at f "]);
testTrace("testDefaultCustomError", testDefaultCustomError,
["hep-hey", "new CustomError"],
["collectStackTrace"]);
diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js
index d0839ba0fb..c97429f7b7 100644
--- a/deps/v8/test/mjsunit/strict-mode.js
+++ b/deps/v8/test/mjsunit/strict-mode.js
@@ -25,8 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --turbo-deoptimization --noharmony-scoping
-// Flags: --noharmony-classes --noharmony-object-literals
+// Flags: --turbo-deoptimization
function CheckStrictMode(code, exception) {
assertDoesNotThrow(code);
@@ -287,19 +286,6 @@ CheckStrictMode("function strict() { print(--arguments); }", SyntaxError);
CheckStrictMode("function strict() { var x = --eval; }", SyntaxError);
CheckStrictMode("function strict() { var x = --arguments; }", SyntaxError);
-// Use of const in strict mode is disallowed in anticipation of ES Harmony.
-CheckStrictMode("const x = 0;", SyntaxError);
-CheckStrictMode("for (const x = 0; false;) {}", SyntaxError);
-CheckStrictMode("function strict() { const x = 0; }", SyntaxError);
-
-// Strict mode only allows functions in StatementList
-CheckStrictMode("if (true) { function invalid() {} }", SyntaxError);
-CheckStrictMode("for (;false;) { function invalid() {} }", SyntaxError);
-CheckStrictMode("{ function invalid() {} }", SyntaxError);
-CheckStrictMode("try { function invalid() {} } catch(e) {}", SyntaxError);
-CheckStrictMode("try { } catch(e) { function invalid() {} }", SyntaxError);
-CheckStrictMode("function outer() {{ function invalid() {} }}", SyntaxError);
-
// Delete of an unqualified identifier
CheckStrictMode("delete unqualified;", SyntaxError);
CheckStrictMode("function strict() { delete unqualified; }", SyntaxError);
diff --git a/deps/v8/test/mjsunit/string-concat.js b/deps/v8/test/mjsunit/string-concat.js
new file mode 100644
index 0000000000..c669b3bd4b
--- /dev/null
+++ b/deps/v8/test/mjsunit/string-concat.js
@@ -0,0 +1,14 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Stringified(toString) {
+ var valueOf = "-" + toString + "-";
+ return {
+ toString: function() { return toString; },
+ valueOf: function() { return valueOf; }
+ };
+}
+
+assertEquals("a.b.", "a.".concat(Stringified("b.")));
+assertEquals("a.b.c.", "a.".concat(Stringified("b."), Stringified("c.")));
diff --git a/deps/v8/test/mjsunit/string-index.js b/deps/v8/test/mjsunit/string-index.js
index 315708ca5f..1c0e3d915d 100644
--- a/deps/v8/test/mjsunit/string-index.js
+++ b/deps/v8/test/mjsunit/string-index.js
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
/**
* @fileoverview Test indexing on strings with [].
*/
@@ -250,6 +252,20 @@ for (var i = 0; i < 100; ++i) {
assertEquals(expected, actual);
}
+// Test out of range with a heap number case.
+var num = Math.floor(4) * 0.5;
+// TODO(mvstanton): figure out a reliable way to get a heap number every time.
+// assertFalse(!%_IsSmi(num));
+var keys = [0, num];
+var str = 'ab', arr = ['a', undefined];
+for (var i = 0; i < 100; ++i) {
+ var index = Math.floor(i / 50);
+ var key = keys[index];
+ var expected = arr[index];
+ var actual = str[key];
+ assertEquals(expected, actual);
+}
+
// Test two byte string.
var str = '\u0427', arr = ['\u0427'];
for (var i = 0; i < 50; ++i) {
diff --git a/deps/v8/test/mjsunit/strong/arrays.js b/deps/v8/test/mjsunit/strong/arrays.js
new file mode 100644
index 0000000000..b9e4fad357
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/arrays.js
@@ -0,0 +1,12 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode
+
+(function NoEllisions() {
+ assertThrows("'use strong'; [,]", SyntaxError);
+ assertThrows("'use strong'; [,3]", SyntaxError);
+ assertThrows("'use strong'; [3,,4]", SyntaxError);
+ assertTrue(eval("'use strong'; [3,] !== [3,4,]"));
+})();
diff --git a/deps/v8/test/mjsunit/strong/classes.js b/deps/v8/test/mjsunit/strong/classes.js
index 3c7caf5f84..e33742af3f 100644
--- a/deps/v8/test/mjsunit/strong/classes.js
+++ b/deps/v8/test/mjsunit/strong/classes.js
@@ -3,15 +3,58 @@
// found in the LICENSE file.
// Flags: --strong-mode
+// Flags: --harmony-classes --harmony-arrow-functions
'use strong';
class C {}
+function assertTypeError(script) { assertThrows(script, TypeError) }
+function assertSyntaxError(script) { assertThrows(script, SyntaxError) }
+function assertReferenceError(script) { assertThrows(script, ReferenceError) }
+
(function ImmutableClassBindings() {
class D {}
- assertThrows(function(){ eval("C = 0") }, TypeError);
- assertThrows(function(){ eval("D = 0") }, TypeError);
+ assertTypeError(function(){ eval("C = 0") });
+ assertTypeError(function(){ eval("D = 0") });
assertEquals('function', typeof C);
assertEquals('function', typeof D);
})();
+
+function constructor(body) {
+ return "'use strong'; " +
+ "(class extends Object { constructor() { " + body + " } })";
+}
+
+(function NoMissingSuper() {
+ assertReferenceError(constructor(""));
+ assertReferenceError(constructor("1"));
+})();
+
+(function NoNestedSuper() {
+ assertSyntaxError(constructor("(super());"));
+ assertSyntaxError(constructor("(() => super())();"));
+ assertSyntaxError(constructor("{ super(); }"));
+ assertSyntaxError(constructor("if (1) super();"));
+})();
+
+(function NoDuplicateSuper() {
+ assertSyntaxError(constructor("super(), super();"));
+ assertSyntaxError(constructor("super(); super();"));
+ assertSyntaxError(constructor("super(); (super());"));
+ assertSyntaxError(constructor("super(); { super() }"));
+ assertSyntaxError(constructor("super(); (() => super())();"));
+})();
+
+(function NoReturnValue() {
+ assertSyntaxError(constructor("return {};"));
+ assertSyntaxError(constructor("return undefined;"));
+ assertSyntaxError(constructor("{ return {}; }"));
+ assertSyntaxError(constructor("if (1) return {};"));
+})();
+
+(function NoReturnBeforeSuper() {
+ assertSyntaxError(constructor("return; super();"));
+ assertSyntaxError(constructor("if (0) return; super();"));
+ assertSyntaxError(constructor("{ return; } super();"));
+})();
diff --git a/deps/v8/test/mjsunit/strong/declaration-after-use.js b/deps/v8/test/mjsunit/strong/declaration-after-use.js
new file mode 100644
index 0000000000..aa5ff67283
--- /dev/null
+++ b/deps/v8/test/mjsunit/strong/declaration-after-use.js
@@ -0,0 +1,258 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --strong-mode --harmony_rest_parameters --harmony_arrow_functions --harmony_classes --harmony_computed-property_names
+
+// Note that it's essential for these tests that the reference is inside dead
+// code (because we already produce ReferenceErrors for run-time unresolved
+// variables and don't want to confuse those with strong mode errors). But the
+// errors should *not* be inside lazy, unexecuted functions, since lazy parsing
+// doesn't produce strong mode scoping errors).
+
+// In addition, assertThrows will call eval and that changes variable binding
+// types (see e.g., UNBOUND_EVAL_SHADOWED). We can avoid unwanted side effects
+// by wrapping the code to be tested inside an outer function.
+function assertThrowsHelper(code) {
+ "use strict";
+ let prologue = "(function outer() { if (false) { ";
+ let epilogue = " } })();";
+
+ assertThrows("'use strong'; " + prologue + code + epilogue, ReferenceError);
+
+ // Make sure the error happens only in strong mode (note that we need strict
+ // mode here because of let).
+ assertDoesNotThrow("'use strict'; " + prologue + code + epilogue);
+}
+
+(function DeclarationAfterUse() {
+ // Note that these tests only test cases where the declaration is found but is
+ // after the use. In particular, we cannot yet detect cases where the use can
+ // possibly bind to a global variable.
+ assertThrowsHelper("x; let x = 0;");
+ assertThrowsHelper("function f() { x; let x = 0; }");
+ assertThrowsHelper("function f() { x; } let x = 0;");
+
+ // These tests needs to be done a bit more manually, since var is not allowed
+ // in strong mode:
+ assertThrows(
+ `(function outer() {
+ function f() { 'use strong'; if (false) { x; } } var x = 0; f();
+ })()`,
+ ReferenceError);
+ assertDoesNotThrow(
+ "(function outer() {\n" +
+ " function f() { if (false) { x; } } var x = 0; f(); \n" +
+ "})()");
+
+ assertThrows(
+ "(function outer() {\n" +
+ " function f() { 'use strong'; if (false) { x; } } var x; f(); \n" +
+ "})()",
+ ReferenceError);
+ assertDoesNotThrow(
+ "(function outer() {\n" +
+ " function f() { if (false) { x; } } var x; f(); \n" +
+ "})()");
+
+ // Errors are also detected when the declaration and the use are in the same
+ // eval scope.
+ assertThrows("'use strong'; eval('if (false) { x; let x = 0;}')",
+ ReferenceError);
+ assertDoesNotThrow("'use strict'; eval('if (false) { x; let x = 0; }')");
+
+ // Use occurring in the initializer of the declaration:
+ assertThrowsHelper("let x = x + 1;");
+ assertThrowsHelper("let x = x;");
+ assertThrowsHelper("let x = y, y = 4;");
+ assertThrowsHelper("let x = function() { x; }");
+ assertThrowsHelper("let x = a => { x; }");
+ assertThrowsHelper("function f(x) { return x; }; let x = f(x);");
+ assertThrowsHelper("const x = x;");
+ assertThrowsHelper("const x = function() { x; }");
+ assertThrowsHelper("const x = a => { x; }");
+ assertThrowsHelper("function f(x) {return x}; const x = f(x);");
+
+ assertThrowsHelper("for (let x = x; ; ) { }");
+ assertThrowsHelper("for (const x = x; ; ) { }");
+ assertThrowsHelper("for (let x = y, y; ; ) { }");
+ assertThrowsHelper("for (const x = y, y = 0; ; ) { }");
+
+ // Computed property names
+ assertThrowsHelper("let o = { 'a': 'b', [o.a]: 'c'};");
+})();
+
+
+(function DeclarationAfterUseInClasses() {
+ assertThrowsHelper("class C extends C { }");
+ assertThrowsHelper("let C = class C2 extends C { }");
+ assertThrowsHelper("let C = class C2 extends C2 { }");
+
+ assertThrowsHelper("let C = class C2 { constructor() { C; } }");
+ assertThrowsHelper("let C = class C2 { method() { C; } }");
+ assertThrowsHelper("let C = class C2 { *generator_method() { C; } }");
+
+ assertThrowsHelper(
+ `let C = class C2 {
+ static a() { return 'A'; }
+ [C.a()]() { return 'B'; }
+ };`);
+
+ assertThrowsHelper(
+ `let C = class C2 {
+ static a() { return 'A'; }
+ [C2.a()]() { return 'B'; }
+ };`);
+
+ assertThrowsHelper(
+ `let C = class C2 {
+ [(function() { C; return 'A';})()]() { return 'B'; }
+ };`);
+
+ // The reference to C or C2 is inside a function, but not a method.
+ assertThrowsHelper(
+ `let C = class C2 {
+ [(function() { C2; return 'A';})()]() { return 'B'; }
+ };`);
+
+ assertThrowsHelper(
+ `let C = class C2 {
+ [(function() { C; return 'A';})()]() { return 'B'; }
+ };`);
+
+ // The reference to C or C2 is inside a method, but it's not a method of the
+ // relevant class (C2).
+ assertThrowsHelper(
+ `let C = class C2 {
+ [(new (class D { m() { C2; return 'A'; } })).m()]() {
+ return 'B';
+ }
+ }`);
+
+ assertThrowsHelper(
+ `let C = class C2 {
+ [(new (class D { m() { C; return 'A'; } })).m()]() {
+ return 'B';
+ }
+ }`);
+
+ assertThrowsHelper(
+ `let C = class C2 {
+ [({m() { C2; return 'A'; }}).m()]() { return 'B'; }
+ }`);
+
+ assertThrowsHelper(
+ `let C = class C2 {
+ [({m() { C; return 'A'; }}).m()]() { return 'B'; }
+ }`);
+
+ assertThrowsHelper(
+ `class COuter {
+ m() {
+ class CInner {
+ [({ m() { CInner; return 'A'; } }).m()]() {
+ return 'B';
+ }
+ }
+ }
+ }`);
+})();
+
+
+(function UsesWhichAreFine() {
+ "use strong";
+
+ let var1 = 0;
+ var1;
+
+ let var2a = 0, var2b = var2a + 1, var2c = 2 + var2b;
+
+ for (let var3 = 0; var3 < 1; var3++) {
+ var3;
+ }
+
+ for (let var4a = 0, var4b = var4a; var4a + var4b < 4; var4a++, var4b++) {
+ var4a;
+ var4b;
+ }
+
+ let var5 = 5;
+ for (; var5 < 10; ++var5) { }
+
+ let arr = [1, 2];
+ for (let i of arr) {
+ i;
+ }
+
+ let var6 = [1, 2];
+ // The second var6 resolves to outside (not to the first var6).
+ for (let var6 of var6) { var6; }
+
+ try {
+ throw "error";
+ } catch (e) {
+ e;
+ }
+
+ function func1() { func1; this; }
+ func1();
+ func1;
+
+ function * func2() { func2; this; }
+ func2();
+ func2;
+
+ function func4(p, ...rest) { p; rest; this; func2; }
+ func4();
+
+ let func5 = (p1, p2) => { p1; p2; };
+ func5();
+
+ let func5b = p1 => p1;
+ func5b();
+
+ function func6() {
+ var1, var2a, var2b, var2c;
+ }
+
+ (function eval1() {
+ let var7 = 0; // Declaration position will be something large.
+ // But use position will be something small, however, this is not an error,
+ // since the use is inside an eval scope.
+ eval("var7;");
+ })();
+
+
+ class C1 { constructor() { C1; } }; new C1();
+ let C2 = class C3 { constructor() { C3; } }; new C2();
+
+ class C4 { method() { C4; } *generator_method() { C4; } }; new C4();
+ let C5 = class C6 { method() { C6; } *generator_method() { C6; } }; new C5();
+
+ class C7 { static method() { C7; } }; new C7();
+ let C8 = class C9 { static method() { C9; } }; new C8();
+
+ class C10 { get x() { C10; } }; new C10();
+ let C11 = class C12 { get x() { C12; } }; new C11();
+
+ // Regression test for unnamed classes.
+ let C13 = class { m() { var1; } };
+
+ class COuter {
+ m() {
+ class CInner {
+ // Here we can refer to COuter but not to CInner (see corresponding
+ // assertion test):
+ [({ m() { COuter; return 'A'; } }).m()]() { return 'B'; }
+ // And here we can refer to both:
+ n() { COuter; CInner; }
+ }
+ return new CInner();
+ }
+ }
+ (new COuter()).m().n();
+
+ // Making sure the check which is supposed to prevent "object literal inside
+ // computed property name references the class name" is not too generic:
+ class C14 { m() { let obj = { n() { C14 } }; obj.n(); } }; (new C14()).m();
+})();
diff --git a/deps/v8/test/mjsunit/strong/functions.js b/deps/v8/test/mjsunit/strong/functions.js
index 4869ac6dfa..6956462e5d 100644
--- a/deps/v8/test/mjsunit/strong/functions.js
+++ b/deps/v8/test/mjsunit/strong/functions.js
@@ -6,28 +6,82 @@
'use strong';
+function f() {}
+function* g() {}
+
(function NoArguments() {
assertThrows("'use strong'; arguments", SyntaxError);
assertThrows("'use strong'; function f() { arguments }", SyntaxError);
+ assertThrows("'use strong'; function* g() { arguments }", SyntaxError);
assertThrows("'use strong'; let f = function() { arguments }", SyntaxError);
+ assertThrows("'use strong'; let g = function*() { arguments }", SyntaxError);
assertThrows("'use strong'; let f = () => arguments", SyntaxError);
// The following are strict mode errors already.
assertThrows("'use strong'; let arguments", SyntaxError);
assertThrows("'use strong'; function f(arguments) {}", SyntaxError);
+ assertThrows("'use strong'; function* g(arguments) {}", SyntaxError);
assertThrows("'use strong'; let f = (arguments) => {}", SyntaxError);
})();
-function g() {}
+(function NoArgumentsProperty() {
+ assertFalse(f.hasOwnProperty("arguments"));
+ assertFalse(g.hasOwnProperty("arguments"));
+ assertThrows(function(){ f.arguments = 0 }, TypeError);
+ assertThrows(function(){ g.arguments = 0 }, TypeError);
+})();
+
+(function NoCaller() {
+ assertFalse(f.hasOwnProperty("caller"));
+ assertFalse(g.hasOwnProperty("caller"));
+ assertThrows(function(){ f.caller = 0 }, TypeError);
+ assertThrows(function(){ g.caller = 0 }, TypeError);
+})();
+
+(function NoCallee() {
+ assertFalse("callee" in f);
+ assertFalse("callee" in g);
+ assertThrows(function(){ f.callee = 0 }, TypeError);
+ assertThrows(function(){ g.callee = 0 }, TypeError);
+})();
-(function LexicalFunctionBindings(global) {
+(function LexicalBindings(global) {
+ assertEquals('function', typeof f);
assertEquals('function', typeof g);
+ assertEquals(undefined, global.f);
assertEquals(undefined, global.g);
})(this);
-(function ImmutableFunctionBindings() {
- function f() {}
- assertThrows(function(){ eval("g = 0") }, TypeError);
- assertThrows(function(){ eval("f = 0") }, TypeError);
- assertEquals('function', typeof g);
+(function ImmutableBindings() {
+ function f2() {}
+ function* g2() {}
+ assertThrows(function(){ f = 0 }, TypeError);
+ assertThrows(function(){ g = 0 }, TypeError);
+ assertThrows(function(){ f2 = 0 }, TypeError);
+ assertThrows(function(){ g2 = 0 }, TypeError);
assertEquals('function', typeof f);
+ assertEquals('function', typeof g);
+ assertEquals('function', typeof f2);
+ assertEquals('function', typeof g2);
+})();
+
+(function NonExtensible() {
+ assertThrows(function(){ f.a = 0 }, TypeError);
+ assertThrows(function(){ g.a = 0 }, TypeError);
+ assertThrows(function(){ Object.defineProperty(f, "a", {value: 0}) }, TypeError);
+ assertThrows(function(){ Object.defineProperty(g, "a", {value: 0}) }, TypeError);
+ assertThrows(function(){ Object.setPrototypeOf(f, {}) }, TypeError);
+ assertThrows(function(){ Object.setPrototypeOf(g, {}) }, TypeError);
+})();
+
+(function NoPrototype() {
+ assertFalse("prototype" in f);
+ assertFalse(g.hasOwnProperty("prototype"));
+ assertThrows(function(){ f.prototype = 0 }, TypeError);
+ assertThrows(function(){ g.prototype = 0 }, TypeError);
+ assertThrows(function(){ f.prototype.a = 0 }, TypeError);
+})();
+
+(function NonConstructor() {
+ assertThrows(function(){ new f }, TypeError);
+ assertThrows(function(){ new g }, TypeError);
})();
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 0d6baf0e22..a1254dbd09 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -61,6 +61,19 @@
# TODO(turbofan): Large switch statements crash.
'js1_5/Regress/regress-398085-01': [PASS, NO_VARIANTS],
+ ############################ INVALID TESTS #############################
+
+ # Function length properties are configurable in ES6
+ 'ecma/Array/15.4.4.3-1': [FAIL],
+ 'ecma/Array/15.4.4.4-1': [FAIL],
+ 'ecma/Array/15.4.4.4-2': [FAIL],
+ 'ecma/String/15.5.4.10-1': [FAIL],
+ 'ecma/String/15.5.4.11-1': [FAIL],
+ 'ecma/String/15.5.4.7-2': [FAIL],
+ 'ecma/String/15.5.4.8-1': [FAIL],
+ 'ecma/String/15.5.4.9-1': [FAIL],
+
+
##################### SKIPPED TESTS #####################
# This test checks that we behave properly in an out-of-memory
diff --git a/deps/v8/test/preparser/strict-function-statement.pyt b/deps/v8/test/preparser/strict-function-statement.pyt
deleted file mode 100644
index cc3d7bb582..0000000000
--- a/deps/v8/test/preparser/strict-function-statement.pyt
+++ /dev/null
@@ -1,109 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# In strict mode, function declarations may only appear as source elements.
-
-# A template that performs the same strict-mode test in different
-# scopes (global scope, function scope, and nested function scope).
-def StrictTest(name, source, legacy):
- if legacy:
- extra_flags = [
- "--noharmony-scoping",
- "--noharmony-classes",
- "--noharmony-object-literals"]
- else:
- extra_flags = []
- Test(name, '"use strict";\n' + source, "strict_function",
- extra_flags)
- Test(name + '-infunc',
- 'function foo() {\n "use strict";\n' + source +'\n}\n',
- "strict_function",
- extra_flags)
- Test(name + '-infunc2',
- 'function foo() {\n "use strict";\n function bar() {\n' +
- source +'\n }\n}\n',
- "strict_function",
- extra_flags)
-
-# Not testing with-scope, since with is not allowed in strict mode at all.
-
-StrictTest("block", """
- { function foo() { } }
-""", True)
-
-StrictTest("try-w-catch", """
- try { function foo() { } } catch (e) { }
-""", True)
-
-StrictTest("try-w-finally", """
- try { function foo() { } } finally { }
-""", True)
-
-StrictTest("catch", """
- try { } catch (e) { function foo() { } }
-""", True)
-
-StrictTest("finally", """
- try { } finally { function foo() { } }
-""", True)
-
-StrictTest("for", """
- for (;;) { function foo() { } }
-""", True)
-
-StrictTest("while", """
- while (true) { function foo() { } }
-""", True)
-
-StrictTest("do", """
- do { function foo() { } } while (true);
-""", True)
-
-StrictTest("then", """
- if (true) { function foo() { } }
-""", True)
-
-
-StrictTest("then-w-else", """
- if (true) { function foo() { } } else { }
-""", True)
-
-
-StrictTest("else", """
- if (true) { } else { function foo() { } }
-""", True)
-
-StrictTest("switch-case", """
- switch (true) { case true: function foo() { } }
-""", False)
-
-StrictTest("labeled", """
- label: function foo() { }
-""", False)
-
-
-
diff --git a/deps/v8/test/test262-es6/test262-es6.status b/deps/v8/test/test262-es6/test262-es6.status
index fd93f295fa..c004242ce3 100644
--- a/deps/v8/test/test262-es6/test262-es6.status
+++ b/deps/v8/test/test262-es6/test262-es6.status
@@ -30,119 +30,119 @@
############################### BUGS ###################################
# BUG(v8:3455)
- '11.2.3_b': [FAIL],
- '12.2.3_b': [FAIL],
+ 'intl402/ch11/11.2/11.2.3_b': [FAIL],
+ 'intl402/ch12/12.2/12.2.3_b': [FAIL],
# Unicode canonicalization is not available with i18n turned off.
- '15.5.4.9_CE': [['no_i18n', SKIP]],
+ 'ch15/15.5/15.5.4/15.5.4.9/15.5.4.9_CE': [['no_i18n', SKIP]],
###################### NEEDS INVESTIGATION #######################
# Possibly same cause as S8.5_A2.1, below: floating-point tests.
- 'S15.8.2.16_A7': [PASS, FAIL_OK],
- 'S15.8.2.18_A7': [PASS, FAIL_OK],
- 'S15.8.2.7_A7': [PASS, FAIL_OK],
+ 'ch15/15.8/15.8.2/15.8.2.16/S15.8.2.16_A7': [PASS, FAIL_OK],
+ 'ch15/15.8/15.8.2/15.8.2.18/S15.8.2.18_A7': [PASS, FAIL_OK],
+ 'ch15/15.8/15.8.2/15.8.2.7/S15.8.2.7_A7': [PASS, FAIL_OK],
# This is an incompatibility between ES5 and V8 on enumerating
# shadowed elements in a for..in loop.
# https://code.google.com/p/v8/issues/detail?id=705
- '12.6.4-2': [PASS, FAIL_OK],
+ 'ch12/12.6/12.6.4/12.6.4-2': [PASS, FAIL_OK],
###################### MISSING ES6 FEATURES #######################
# Array.fill (currently requires --harmony-arrays)
- 'S22.1.3.6_T1': [FAIL],
+ 'es6/ch22/22.1/22.1.3/S22.1.3.6_T1': [FAIL],
# Array.find (currently requires --harmony-arrays)
- 'S22.1.2.3_T1': [FAIL],
- 'S22.1.2.3_T2': [FAIL],
- 'Array.prototype.find_empty-array-undefined': [FAIL],
- 'Array.prototype.find_length-property': [FAIL],
- 'Array.prototype.find_modify-after-start': [FAIL],
- 'Array.prototype.find_non-returning-predicate': [FAIL],
- 'Array.prototype.find_predicate-arguments': [FAIL],
- 'Array.prototype.find_push-after-start': [FAIL],
- 'Array.prototype.find_remove-after-start': [FAIL],
- 'Array.prototype.find_return-found-value': [FAIL],
- 'Array.prototype.find_skip-empty': [FAIL],
- 'Array.prototype.find_this-defined': [FAIL],
- 'Array.prototype.find_this-is-object': [FAIL],
- 'Array.prototype.find_this-undefined': [FAIL],
+ 'es6/ch22/22.1/22.1.2/S22.1.2.3_T1': [FAIL],
+ 'es6/ch22/22.1/22.1.2/S22.1.2.3_T2': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_empty-array-undefined': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_length-property': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_modify-after-start': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_non-returning-predicate': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_predicate-arguments': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_push-after-start': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_remove-after-start': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_return-found-value': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_skip-empty': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_this-defined': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_this-is-object': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_this-undefined': [FAIL],
# Array.from
- 'S22.1.2.1_T1': [FAIL],
- 'S22.1.2.1_T2': [FAIL],
+ 'es6/ch22/22.1/22.1.2/S22.1.2.1_T1': [FAIL],
+ 'es6/ch22/22.1/22.1.2/S22.1.2.1_T2': [FAIL],
# Direct proxies
- 'Array.prototype.find_callable-predicate': [FAIL],
+ 'es6/Array.prototype.find/Array.prototype.find_callable-predicate': [FAIL],
######################## OBSOLETED BY ES6 ###########################
# ES6 allows duplicate properties
- '11.1.5-4-4-a-1-s': [FAIL],
- '11.1.5_4-4-b-1': [FAIL],
- '11.1.5_4-4-b-2': [FAIL],
- '11.1.5_4-4-c-1': [FAIL],
- '11.1.5_4-4-c-2': [FAIL],
- '11.1.5_4-4-d-1': [FAIL],
- '11.1.5_4-4-d-2': [FAIL],
- '11.1.5_4-4-d-3': [FAIL],
- '11.1.5_4-4-d-4': [FAIL],
+ 'ch11/11.1/11.1.5/11.1.5-4-4-a-1-s': [FAIL],
+ 'ch11/11.1/11.1.5/11.1.5_4-4-b-1': [FAIL],
+ 'ch11/11.1/11.1.5/11.1.5_4-4-b-2': [FAIL],
+ 'ch11/11.1/11.1.5/11.1.5_4-4-c-1': [FAIL],
+ 'ch11/11.1/11.1.5/11.1.5_4-4-c-2': [FAIL],
+ 'ch11/11.1/11.1.5/11.1.5_4-4-d-1': [FAIL],
+ 'ch11/11.1/11.1.5/11.1.5_4-4-d-2': [FAIL],
+ 'ch11/11.1/11.1.5/11.1.5_4-4-d-3': [FAIL],
+ 'ch11/11.1/11.1.5/11.1.5_4-4-d-4': [FAIL],
# ES6 does ToObject for Object.prototype.getOwnPropertyNames
- '15.2.3.4-1': [FAIL],
- '15.2.3.4-1-4': [FAIL],
- '15.2.3.4-1-5': [FAIL],
+ 'ch15/15.2/15.2.3/15.2.3.4/15.2.3.4-1': [FAIL],
+ 'ch15/15.2/15.2.3/15.2.3.4/15.2.3.4-1-4': [FAIL],
+ 'ch15/15.2/15.2.3/15.2.3.4/15.2.3.4-1-5': [FAIL],
# ES6 allows block-local functions.
- 'Sbp_A1_T1': [FAIL],
- 'Sbp_A2_T1': [FAIL],
- 'Sbp_A2_T2': [FAIL],
- 'Sbp_A3_T1': [FAIL],
- 'Sbp_A3_T2': [FAIL],
- 'Sbp_A4_T1': [FAIL],
- 'Sbp_A4_T2': [FAIL],
- 'Sbp_A5_T1': [PASS], # Test is broken (strict reference to unbound variable)
- 'Sbp_A5_T2': [FAIL],
+ 'bestPractice/Sbp_A1_T1': [FAIL],
+ 'bestPractice/Sbp_A2_T1': [FAIL],
+ 'bestPractice/Sbp_A2_T2': [FAIL],
+ 'bestPractice/Sbp_A3_T1': [FAIL],
+ 'bestPractice/Sbp_A3_T2': [FAIL],
+ 'bestPractice/Sbp_A4_T1': [FAIL],
+ 'bestPractice/Sbp_A4_T2': [FAIL],
+ 'bestPractice/Sbp_A5_T1': [PASS], # Test is broken (strict reference to unbound variable)
+ 'bestPractice/Sbp_A5_T2': [FAIL],
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
# to be either marked as bugs with issues filed for them or as deliberate
# incompatibilities if the test cases turn out to be broken or ambiguous.
- '6.2.3': [FAIL],
- '9.2.1_2': [FAIL],
- '9.2.6_2': [FAIL],
- '10.1.1_a': [FAIL],
- '10.1.1_19_c': [PASS, FAIL, NO_VARIANTS],
- '10.1.2.1_4': [FAIL],
- '10.2.3_b': [PASS, FAIL],
- '10.3_a': [FAIL],
- '11.1.1_17': [PASS, FAIL],
- '11.1.1_19': [PASS, FAIL],
- '11.1.1_20_c': [FAIL],
- '11.1.1_a': [FAIL],
- '11.1.2.1_4': [FAIL],
- '11.3.2_FN_2': [PASS, FAIL],
- '11.3.2_TRF': [PASS, FAIL],
- '11.3_a': [FAIL],
- '12.1.1_a': [FAIL],
- '12.1.2.1_4': [FAIL],
- '12.3.2_FDT_7_a_iv': [FAIL],
- '12.3.3': [FAIL],
- '12.3_a': [FAIL],
- '15.5.4.9_3': [PASS, FAIL],
+ 'intl402/ch06/6.2/6.2.3': [FAIL],
+ 'intl402/ch09/9.2/9.2.1_2': [FAIL],
+ 'intl402/ch09/9.2/9.2.6_2': [FAIL],
+ 'intl402/ch10/10.1/10.1.1_a': [FAIL],
+ 'intl402/ch10/10.1/10.1.1_19_c': [PASS, FAIL, NO_VARIANTS],
+ 'intl402/ch10/10.1/10.1.2.1_4': [FAIL],
+ 'intl402/ch10/10.2/10.2.3_b': [PASS, FAIL],
+ 'intl402/ch10/10.3/10.3_a': [FAIL],
+ 'intl402/ch11/11.1/11.1.1_17': [PASS, FAIL],
+ 'intl402/ch11/11.1/11.1.1_19': [PASS, FAIL],
+ 'intl402/ch11/11.1/11.1.1_20_c': [FAIL],
+ 'intl402/ch11/11.1/11.1.1_a': [FAIL],
+ 'intl402/ch11/11.1/11.1.2.1_4': [FAIL],
+ 'intl402/ch11/11.3/11.3.2_FN_2': [PASS, FAIL],
+ 'intl402/ch11/11.3/11.3.2_TRF': [PASS, FAIL],
+ 'intl402/ch11/11.3/11.3_a': [FAIL],
+ 'intl402/ch12/12.1/12.1.1_a': [FAIL],
+ 'intl402/ch12/12.1/12.1.2.1_4': [FAIL],
+ 'intl402/ch12/12.3/12.3.2_FDT_7_a_iv': [FAIL],
+ 'intl402/ch12/12.3/12.3.3': [FAIL],
+ 'intl402/ch12/12.3/12.3_a': [FAIL],
+ 'intl402/ch15/15.5/15.5.4/15.5.4.915.5.4.9_3': [PASS, FAIL],
##################### DELIBERATE INCOMPATIBILITIES #####################
- 'S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
+ 'ch15/15.8/15.8.2/15.8.2.8/S15.8.2.8_A6': [PASS, FAIL_OK], # Math.exp (less precise with --fast-math)
# Linux for ia32 (and therefore simulators) default to extended 80 bit
# floating point formats, so these tests checking 64-bit FP precision fail.
# The other platforms/arch's pass these tests.
# We follow the other major JS engines by keeping this default.
- 'S8.5_A2.1': [PASS, FAIL_OK],
- 'S8.5_A2.2': [PASS, FAIL_OK],
+ 'ch08/8.5/S8.5_A2.1': [PASS, FAIL_OK],
+ 'ch08/8.5/S8.5_A2.2': [PASS, FAIL_OK],
############################ INVALID TESTS #############################
@@ -150,55 +150,181 @@
# tests in PST/PDT between first Sunday in March and first Sunday in April.
# The DST switch was moved in 2007 whereas Test262 bases the reference value
# on 2000. Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
- 'S15.9.3.1_A5_T1': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T2': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T3': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T4': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T5': [PASS, FAIL_OK],
- 'S15.9.3.1_A5_T6': [PASS, FAIL_OK],
+ 'ch15/15.9/15.9.3/S15.9.3.1_A5_T1': [PASS, FAIL_OK],
+ 'ch15/15.9/15.9.3/S15.9.3.1_A5_T2': [PASS, FAIL_OK],
+ 'ch15/15.9/15.9.3/S15.9.3.1_A5_T3': [PASS, FAIL_OK],
+ 'ch15/15.9/15.9.3/S15.9.3.1_A5_T4': [PASS, FAIL_OK],
+ 'ch15/15.9/15.9.3/S15.9.3.1_A5_T5': [PASS, FAIL_OK],
+ 'ch15/15.9/15.9.3/S15.9.3.1_A5_T6': [PASS, FAIL_OK],
# Test makes unjustified assumptions about the number of calls to SortCompare.
# Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=596
- 'bug_596_1': [PASS, FAIL_OK],
+ 'es6/bug_596_1': [PASS, FAIL_OK],
# Tests do not return boolean.
- '15.2.3.14-1-1': [PASS, FAIL_OK],
- '15.2.3.14-1-2': [PASS, FAIL_OK],
- '15.2.3.14-1-3': [PASS, FAIL_OK],
+ 'ch15/15.2/15.2.3/15.2.3.14/15.2.3.14-1-1': [PASS, FAIL_OK],
+ 'ch15/15.2/15.2.3/15.2.3.14/15.2.3.14-1-2': [PASS, FAIL_OK],
+ 'ch15/15.2/15.2.3/15.2.3.14/15.2.3.14-1-3': [PASS, FAIL_OK],
# String.prototype.contains renamed to 'S.p.includes'
- 'String.prototype.contains_FailBadLocation' : [FAIL_OK],
- 'String.prototype.contains_FailLocation' : [FAIL_OK],
- 'String.prototype.contains_FailMissingLetter' : [FAIL_OK],
- 'String.prototype.contains_lengthProp' : [FAIL_OK],
- 'String.prototype.contains_Success' : [FAIL_OK],
- 'String.prototype.contains_SuccessNoLocation' : [FAIL_OK],
-
+ 'es6/String.prototype.contains/String.prototype.contains_FailBadLocation' : [FAIL_OK],
+ 'es6/String.prototype.contains/String.prototype.contains_FailLocation' : [FAIL_OK],
+ 'es6/String.prototype.contains/String.prototype.contains_FailMissingLetter' : [FAIL_OK],
+ 'es6/String.prototype.contains/String.prototype.contains_lengthProp' : [FAIL_OK],
+ 'es6/String.prototype.contains/String.prototype.contains_Success' : [FAIL_OK],
+ 'es6/String.prototype.contains/String.prototype.contains_SuccessNoLocation' : [FAIL_OK],
+
+ # Function length properties are configurable in ES6
+ 'ch11/11.4/11.4.1/11.4.1-5-a-28-s': [FAIL],
+ 'ch13/13.2/13.2-15-1': [FAIL],
+ 'ch15/15.1/15.1.2/15.1.2.1/S15.1.2.1_A4.2': [FAIL],
+ 'ch15/15.1/15.1.2/15.1.2.2/S15.1.2.2_A9.2': [FAIL],
+ 'ch15/15.1/15.1.2/15.1.2.3/S15.1.2.3_A7.2': [FAIL],
+ 'ch15/15.1/15.1.2/15.1.2.4/S15.1.2.4_A2.2': [FAIL],
+ 'ch15/15.1/15.1.2/15.1.2.5/S15.1.2.5_A2.2': [FAIL],
+ 'ch15/15.1/15.1.3/15.1.3.1/S15.1.3.1_A5.2': [FAIL],
+ 'ch15/15.1/15.1.3/15.1.3.2/S15.1.3.2_A5.2': [FAIL],
+ 'ch15/15.1/15.1.3/15.1.3.3/S15.1.3.3_A5.2': [FAIL],
+ 'ch15/15.1/15.1.3/15.1.3.4/S15.1.3.4_A5.2': [FAIL],
+ 'ch15/15.10/15.10.6/15.10.6.2/S15.10.6.2_A9': [FAIL],
+ 'ch15/15.10/15.10.6/15.10.6.3/S15.10.6.3_A9': [FAIL],
+ 'ch15/15.10/15.10.6/15.10.6.4/S15.10.6.4_A9': [FAIL],
+ 'ch15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-186': [FAIL],
+ 'ch15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-187': [FAIL],
+ 'ch15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-191': [FAIL],
+ 'ch15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-194': [FAIL],
+ 'ch15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-201': [FAIL],
+ 'ch15/15.2/15.2.4/15.2.4.2/S15.2.4.2_A9': [FAIL],
+ 'ch15/15.2/15.2.4/15.2.4.3/S15.2.4.3_A9': [FAIL],
+ 'ch15/15.2/15.2.4/15.2.4.4/S15.2.4.4_A9': [FAIL],
+ 'ch15/15.2/15.2.4/15.2.4.5/S15.2.4.5_A9': [FAIL],
+ 'ch15/15.2/15.2.4/15.2.4.6/S15.2.4.6_A9': [FAIL],
+ 'ch15/15.2/15.2.4/15.2.4.7/S15.2.4.7_A9': [FAIL],
+ 'ch15/15.3/15.3.3/15.3.3.2/15.3.3.2-1': [FAIL],
+ 'ch15/15.3/15.3.4/15.3.4.2/S15.3.4.2_A9': [FAIL],
+ 'ch15/15.3/15.3.4/15.3.4.3/S15.3.4.3_A9': [FAIL],
+ 'ch15/15.3/15.3.4/15.3.4.4/S15.3.4.4_A9': [FAIL],
+ 'ch15/15.3/15.3.5/S15.3.5.1_A2_T1': [FAIL],
+ 'ch15/15.3/15.3.5/S15.3.5.1_A2_T2': [FAIL],
+ 'ch15/15.3/15.3.5/S15.3.5.1_A2_T3': [FAIL],
+ 'ch15/15.4/15.4.3/S15.4.3_A2.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.10/S15.4.4.10_A5.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.11/S15.4.4.11_A7.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.12/S15.4.4.12_A5.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.13/S15.4.4.13_A5.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.2/S15.4.4.2_A4.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.3/S15.4.4.3_A4.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.4/S15.4.4.4_A4.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.5/S15.4.4.5_A6.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.6/S15.4.4.6_A5.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.7/S15.4.4.7_A6.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.8/S15.4.4.8_A5.2': [FAIL],
+ 'ch15/15.4/15.4.4/15.4.4.9/S15.4.4.9_A5.2': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.10/S15.5.4.10_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.11/S15.5.4.11_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.12/S15.5.4.12_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.13/S15.5.4.13_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.14/S15.5.4.14_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.15/S15.5.4.15_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.16/S15.5.4.16_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.17/S15.5.4.17_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.18/S15.5.4.18_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.19/S15.5.4.19_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.4/S15.5.4.4_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.5/S15.5.4.5_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.6/S15.5.4.6_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.7/S15.5.4.7_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.8/S15.5.4.8_A9': [FAIL],
+ 'ch15/15.5/15.5.4/15.5.4.9/S15.5.4.9_A9': [FAIL],
+ 'ch15/15.9/15.9.4/15.9.4.2/S15.9.4.2_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.4/15.9.4.3/S15.9.4.3_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.1/S15.9.5.1_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.10/S15.9.5.10_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.11/S15.9.5.11_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.12/S15.9.5.12_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.13/S15.9.5.13_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.14/S15.9.5.14_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.15/S15.9.5.15_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.16/S15.9.5.16_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.17/S15.9.5.17_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.18/S15.9.5.18_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.19/S15.9.5.19_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.2/S15.9.5.2_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.20/S15.9.5.20_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.21/S15.9.5.21_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.22/S15.9.5.22_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.23/S15.9.5.23_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.24/S15.9.5.24_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.25/S15.9.5.25_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.26/S15.9.5.26_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.27/S15.9.5.27_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.28/S15.9.5.28_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.29/S15.9.5.29_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.3/S15.9.5.3_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.30/S15.9.5.30_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.31/S15.9.5.31_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.32/S15.9.5.32_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.33/S15.9.5.33_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.34/S15.9.5.34_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.35/S15.9.5.35_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.36/S15.9.5.36_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.37/S15.9.5.37_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.38/S15.9.5.38_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.39/S15.9.5.39_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.4/S15.9.5.4_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.40/S15.9.5.40_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.41/S15.9.5.41_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.42/S15.9.5.42_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.5/S15.9.5.5_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.6/S15.9.5.6_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.7/S15.9.5.7_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.8/S15.9.5.8_A3_T2': [FAIL],
+ 'ch15/15.9/15.9.5/15.9.5.9/S15.9.5.9_A3_T2': [FAIL],
+ 'intl402/ch10/10.1/10.1_L15': [FAIL],
+ 'intl402/ch10/10.2/10.2.2_L15': [FAIL],
+ 'intl402/ch10/10.3/10.3.2_1_a_L15': [FAIL],
+ 'intl402/ch10/10.3/10.3.2_L15': [FAIL],
+ 'intl402/ch10/10.3/10.3.3_L15': [FAIL],
+ 'intl402/ch11/11.1/11.1_L15': [FAIL],
+ 'intl402/ch11/11.2/11.2.2_L15': [FAIL],
+ 'intl402/ch11/11.3/11.3.2_1_a_L15': [FAIL],
+ 'intl402/ch11/11.3/11.3.2_L15': [FAIL],
+ 'intl402/ch11/11.3/11.3.3_L15': [FAIL],
+ 'intl402/ch12/12.1/12.1_L15': [FAIL],
+ 'intl402/ch12/12.2/12.2.2_L15': [FAIL],
+ 'intl402/ch12/12.3/12.3.2_1_a_L15': [FAIL],
+ 'intl402/ch12/12.3/12.3.2_L15': [FAIL],
+ 'intl402/ch12/12.3/12.3.3_L15': [FAIL],
+ 'intl402/ch13/13.1/13.1.1_L15': [FAIL],
+ 'intl402/ch13/13.2/13.2.1_L15': [FAIL],
+ 'intl402/ch13/13.3/13.3.1_L15': [FAIL],
+ 'intl402/ch13/13.3/13.3.2_L15': [FAIL],
+ 'intl402/ch13/13.3/13.3.3_L15': [FAIL],
############################ SKIPPED TESTS #############################
# These tests take a looong time to run in debug mode.
- 'S15.1.3.1_A2.5_T1': [PASS, ['mode == debug', SKIP]],
- 'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
+ 'ch15/15.1/15.1.3/15.1.3.1/S15.1.3.1_A2.5_T1': [PASS, ['mode == debug', SKIP]],
+ 'ch15/15.1/15.1.3/15.1.3.2/S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
}], # ALWAYS
['system == macos', {
- '11.3.2_TRP': [FAIL],
- '9.2.5_11_g_ii_2': [FAIL],
+ 'intl402/ch11/11.3/11.3.2_TRP': [FAIL],
+ 'intl402/ch09/9.2/9.2.5_11_g_ii_2': [FAIL],
}], # system == macos
['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64el', {
# TODO(mstarzinger): Causes stack overflow on simulators due to eager
# compilation of parenthesized function literals. Needs investigation.
- 'S13.2.1_A1_T1': [SKIP],
+ 'ch13/13.2/S13.2.1_A1_T1': [SKIP],
# BUG(3251225): Tests that timeout with --nocrankshaft.
- 'S15.1.3.1_A2.4_T1': [SKIP],
- 'S15.1.3.1_A2.5_T1': [SKIP],
- 'S15.1.3.2_A2.4_T1': [SKIP],
- 'S15.1.3.2_A2.5_T1': [SKIP],
- 'S15.1.3.3_A2.3_T1': [SKIP],
- 'S15.1.3.4_A2.3_T1': [SKIP],
+ 'ch15/15.1/15.1.3/15.1.3.1/S15.1.3.1_A2.4_T1': [SKIP],
+ 'ch15/15.1/15.1.3/15.1.3.1/S15.1.3.1_A2.5_T1': [SKIP],
+ 'ch15/15.1/15.1.3/15.1.3.2/S15.1.3.2_A2.4_T1': [SKIP],
+ 'ch15/15.1/15.1.3/15.1.3.2/S15.1.3.2_A2.5_T1': [SKIP],
+ 'ch15/15.1/15.1.3/15.1.3.3/S15.1.3.3_A2.3_T1': [SKIP],
+ 'ch15/15.1/15.1.3/15.1.3.4/S15.1.3.4_A2.3_T1': [SKIP],
}], # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
]
diff --git a/deps/v8/test/test262-es6/testcfg.py b/deps/v8/test/test262-es6/testcfg.py
index 0a894104a2..cb44da073a 100644
--- a/deps/v8/test/test262-es6/testcfg.py
+++ b/deps/v8/test/test262-es6/testcfg.py
@@ -57,9 +57,6 @@ class Test262TestSuite(testsuite.TestSuite):
self.harness += [os.path.join(self.root, "harness-adapt.js")]
self.ParseTestRecord = None
- def CommonTestName(self, testcase):
- return testcase.path.split(os.path.sep)[-1]
-
def ListTests(self, context):
tests = []
for dirname, dirs, files in os.walk(self.testroot):
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index d1800c5fc5..8e7496bc25 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -62,6 +62,133 @@
'11.1.5_4-4-d-3': [FAIL],
'11.1.5_4-4-d-4': [FAIL],
+ # Function length properties are configurable in ES6
+ '10.1_L15': [FAIL],
+ '10.2.2_L15': [FAIL],
+ '10.3.2_1_a_L15': [FAIL],
+ '10.3.2_L15': [FAIL],
+ '10.3.3_L15': [FAIL],
+ '11.1_L15': [FAIL],
+ '11.2.2_L15': [FAIL],
+ '11.3.2_1_a_L15': [FAIL],
+ '11.3.2_L15': [FAIL],
+ '11.3.3_L15': [FAIL],
+ '11.4.1-5-a-28-s': [FAIL],
+ '12.1_L15': [FAIL],
+ '12.2.2_L15': [FAIL],
+ '12.3.2_1_a_L15': [FAIL],
+ '12.3.2_L15': [FAIL],
+ '12.3.3_L15': [FAIL],
+ '13.1.1_L15': [FAIL],
+ '13.2-15-1': [FAIL],
+ '13.2.1_L15': [FAIL],
+ '13.3.1_L15': [FAIL],
+ '13.3.2_L15': [FAIL],
+ '13.3.3_L15': [FAIL],
+ '15.2.3.3-4-186': [FAIL],
+ '15.2.3.3-4-187': [FAIL],
+ '15.2.3.3-4-191': [FAIL],
+ '15.2.3.3-4-194': [FAIL],
+ '15.2.3.3-4-201': [FAIL],
+ '15.3.3.2-1': [FAIL],
+ 'S15.1.2.1_A4.2': [FAIL],
+ 'S15.1.2.2_A9.2': [FAIL],
+ 'S15.1.2.3_A7.2': [FAIL],
+ 'S15.1.2.4_A2.2': [FAIL],
+ 'S15.1.2.5_A2.2': [FAIL],
+ 'S15.1.3.1_A5.2': [FAIL],
+ 'S15.1.3.2_A5.2': [FAIL],
+ 'S15.1.3.3_A5.2': [FAIL],
+ 'S15.1.3.4_A5.2': [FAIL],
+ 'S15.10.6.2_A9': [FAIL],
+ 'S15.10.6.3_A9': [FAIL],
+ 'S15.10.6.4_A9': [FAIL],
+ 'S15.2.4.2_A9': [FAIL],
+ 'S15.2.4.3_A9': [FAIL],
+ 'S15.2.4.4_A9': [FAIL],
+ 'S15.2.4.5_A9': [FAIL],
+ 'S15.2.4.6_A9': [FAIL],
+ 'S15.2.4.7_A9': [FAIL],
+ 'S15.3.4.2_A9': [FAIL],
+ 'S15.3.4.3_A9': [FAIL],
+ 'S15.3.4.4_A9': [FAIL],
+ 'S15.3.5.1_A2_T1': [FAIL],
+ 'S15.3.5.1_A2_T2': [FAIL],
+ 'S15.3.5.1_A2_T3': [FAIL],
+ 'S15.4.3_A2.2': [FAIL],
+ 'S15.4.4.10_A5.2': [FAIL],
+ 'S15.4.4.11_A7.2': [FAIL],
+ 'S15.4.4.12_A5.2': [FAIL],
+ 'S15.4.4.13_A5.2': [FAIL],
+ 'S15.4.4.2_A4.2': [FAIL],
+ 'S15.4.4.3_A4.2': [FAIL],
+ 'S15.4.4.4_A4.2': [FAIL],
+ 'S15.4.4.5_A6.2': [FAIL],
+ 'S15.4.4.6_A5.2': [FAIL],
+ 'S15.4.4.7_A6.2': [FAIL],
+ 'S15.4.4.8_A5.2': [FAIL],
+ 'S15.4.4.9_A5.2': [FAIL],
+ 'S15.5.4.10_A9': [FAIL],
+ 'S15.5.4.11_A9': [FAIL],
+ 'S15.5.4.12_A9': [FAIL],
+ 'S15.5.4.13_A9': [FAIL],
+ 'S15.5.4.14_A9': [FAIL],
+ 'S15.5.4.15_A9': [FAIL],
+ 'S15.5.4.16_A9': [FAIL],
+ 'S15.5.4.17_A9': [FAIL],
+ 'S15.5.4.18_A9': [FAIL],
+ 'S15.5.4.19_A9': [FAIL],
+ 'S15.5.4.4_A9': [FAIL],
+ 'S15.5.4.5_A9': [FAIL],
+ 'S15.5.4.6_A9': [FAIL],
+ 'S15.5.4.7_A9': [FAIL],
+ 'S15.5.4.8_A9': [FAIL],
+ 'S15.5.4.9_A9': [FAIL],
+ 'S15.9.4.2_A3_T2': [FAIL],
+ 'S15.9.4.3_A3_T2': [FAIL],
+ 'S15.9.5.10_A3_T2': [FAIL],
+ 'S15.9.5.11_A3_T2': [FAIL],
+ 'S15.9.5.12_A3_T2': [FAIL],
+ 'S15.9.5.13_A3_T2': [FAIL],
+ 'S15.9.5.14_A3_T2': [FAIL],
+ 'S15.9.5.15_A3_T2': [FAIL],
+ 'S15.9.5.16_A3_T2': [FAIL],
+ 'S15.9.5.17_A3_T2': [FAIL],
+ 'S15.9.5.18_A3_T2': [FAIL],
+ 'S15.9.5.19_A3_T2': [FAIL],
+ 'S15.9.5.1_A3_T2': [FAIL],
+ 'S15.9.5.20_A3_T2': [FAIL],
+ 'S15.9.5.21_A3_T2': [FAIL],
+ 'S15.9.5.22_A3_T2': [FAIL],
+ 'S15.9.5.23_A3_T2': [FAIL],
+ 'S15.9.5.24_A3_T2': [FAIL],
+ 'S15.9.5.25_A3_T2': [FAIL],
+ 'S15.9.5.26_A3_T2': [FAIL],
+ 'S15.9.5.27_A3_T2': [FAIL],
+ 'S15.9.5.28_A3_T2': [FAIL],
+ 'S15.9.5.29_A3_T2': [FAIL],
+ 'S15.9.5.2_A3_T2': [FAIL],
+ 'S15.9.5.30_A3_T2': [FAIL],
+ 'S15.9.5.31_A3_T2': [FAIL],
+ 'S15.9.5.32_A3_T2': [FAIL],
+ 'S15.9.5.33_A3_T2': [FAIL],
+ 'S15.9.5.34_A3_T2': [FAIL],
+ 'S15.9.5.35_A3_T2': [FAIL],
+ 'S15.9.5.36_A3_T2': [FAIL],
+ 'S15.9.5.37_A3_T2': [FAIL],
+ 'S15.9.5.38_A3_T2': [FAIL],
+ 'S15.9.5.39_A3_T2': [FAIL],
+ 'S15.9.5.3_A3_T2': [FAIL],
+ 'S15.9.5.40_A3_T2': [FAIL],
+ 'S15.9.5.41_A3_T2': [FAIL],
+ 'S15.9.5.42_A3_T2': [FAIL],
+ 'S15.9.5.4_A3_T2': [FAIL],
+ 'S15.9.5.5_A3_T2': [FAIL],
+ 'S15.9.5.6_A3_T2': [FAIL],
+ 'S15.9.5.7_A3_T2': [FAIL],
+ 'S15.9.5.8_A3_T2': [FAIL],
+ 'S15.9.5.9_A3_T2': [FAIL],
+
######################## NEEDS INVESTIGATION ###########################
# These test failures are specific to the intl402 suite and need investigation
diff --git a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 1fa0b10842..85e52488b4 100644
--- a/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -2426,6 +2426,21 @@ TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmClz, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 571dbecd14..02c8d2e06d 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -472,6 +472,36 @@ TEST_P(InstructionSelectorAddSubTest, ShiftByImmediateOnRight) {
}
+TEST_P(InstructionSelectorAddSubTest, ExtendByte) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.mi.constructor)(
+ m.Parameter(0), m.Word32And(m.Parameter(1), m.Int32Constant(0xff))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, ExtendHalfword) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.mi.constructor)(
+ m.Parameter(0), m.Word32And(m.Parameter(1), m.Int32Constant(0xffff))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
::testing::ValuesIn(kAddSubInstructions));
@@ -616,6 +646,58 @@ TEST_F(InstructionSelectorTest, AddShiftByImmediateOnLeft) {
}
+TEST_F(InstructionSelectorTest, AddExtendByteOnLeft) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xff)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt32, kMachInt64);
+ m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xff)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddExtendHalfwordOnLeft) {
+ {
+ StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xffff)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, kMachInt64, kMachInt32, kMachInt64);
+ m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xffff)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Data processing controlled branches.
@@ -896,26 +978,6 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
}
-
- TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1L << bit;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
- m.Branch(
- m.Word64BinaryNot(m.Word64And(m.Parameter(0), m.Int64Constant(mask))),
- &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
- EXPECT_EQ(4U, s[0]->InputCount());
- EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
- }
}
@@ -937,26 +999,6 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
}
-
- TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1L << bit;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
- m.Branch(
- m.Word64BinaryNot(m.Word64And(m.Int64Constant(mask), m.Parameter(0))),
- &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
- EXPECT_EQ(4U, s[0]->InputCount());
- EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
- }
}
@@ -2200,6 +2242,21 @@ TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Clz32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
index 5b31f5e04c..5cfb8fdc41 100644
--- a/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/change-lowering-unittest.cc
@@ -23,45 +23,15 @@ namespace v8 {
namespace internal {
namespace compiler {
-class ChangeLoweringTest : public GraphTest {
+class ChangeLoweringTest : public TypedGraphTest {
public:
ChangeLoweringTest() : simplified_(zone()) {}
- ~ChangeLoweringTest() OVERRIDE {}
virtual MachineType WordRepresentation() const = 0;
protected:
- int HeapNumberValueOffset() const {
- STATIC_ASSERT(HeapNumber::kValueOffset % kApiPointerSize == 0);
- return (HeapNumber::kValueOffset / kApiPointerSize) * PointerSize() -
- kHeapObjectTag;
- }
bool Is32() const { return WordRepresentation() == kRepWord32; }
- int PointerSize() const {
- switch (WordRepresentation()) {
- case kRepWord32:
- return 4;
- case kRepWord64:
- return 8;
- default:
- break;
- }
- UNREACHABLE();
- return 0;
- }
- int SmiMaxValue() const { return -(SmiMinValue() + 1); }
- int SmiMinValue() const {
- return static_cast<int>(0xffffffffu << (SmiValueSize() - 1));
- }
- int SmiShiftAmount() const { return kSmiTagSize + SmiShiftSize(); }
- int SmiShiftSize() const {
- return Is32() ? SmiTagging<4>::SmiShiftSize()
- : SmiTagging<8>::SmiShiftSize();
- }
- int SmiValueSize() const {
- return Is32() ? SmiTagging<4>::SmiValueSize()
- : SmiTagging<8>::SmiValueSize();
- }
+ bool Is64() const { return WordRepresentation() == kRepWord64; }
Reduction Reduce(Node* node) {
MachineOperatorBuilder machine(zone(), WordRepresentation());
@@ -80,15 +50,33 @@ class ChangeLoweringTest : public GraphTest {
IsNumberConstant(BitEq(0.0)), effect_matcher,
control_matcher);
}
+ Matcher<Node*> IsChangeInt32ToSmi(const Matcher<Node*>& value_matcher) {
+ return Is64() ? IsWord64Shl(IsChangeInt32ToInt64(value_matcher),
+ IsSmiShiftBitsConstant())
+ : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
+ }
+ Matcher<Node*> IsChangeSmiToInt32(const Matcher<Node*>& value_matcher) {
+ return Is64() ? IsTruncateInt64ToInt32(
+ IsWord64Sar(value_matcher, IsSmiShiftBitsConstant()))
+ : IsWord32Sar(value_matcher, IsSmiShiftBitsConstant());
+ }
+ Matcher<Node*> IsChangeUint32ToSmi(const Matcher<Node*>& value_matcher) {
+ return Is64() ? IsWord64Shl(IsChangeUint32ToUint64(value_matcher),
+ IsSmiShiftBitsConstant())
+ : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
+ }
Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher) {
return IsLoad(kMachFloat64, value_matcher,
- IsIntPtrConstant(HeapNumberValueOffset()), graph()->start(),
- control_matcher);
+ IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
+ graph()->start(), control_matcher);
}
Matcher<Node*> IsIntPtrConstant(int value) {
return Is32() ? IsInt32Constant(value) : IsInt64Constant(value);
}
+ Matcher<Node*> IsSmiShiftBitsConstant() {
+ return IsIntPtrConstant(kSmiShiftSize + kSmiTagSize);
+ }
Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
@@ -115,51 +103,95 @@ class ChangeLoweringCommonTest
TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeBitToBool(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsSelect(static_cast<MachineType>(kTypeBool | kRepTagged), val,
- IsTrueConstant(), IsFalseConstant()));
+ Node* value = Parameter(Type::Boolean());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeBitToBool(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSelect(kMachAnyTagged, value, IsTrueConstant(),
+ IsFalseConstant()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBoolToBit) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeBoolToBit(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- EXPECT_THAT(reduction.replacement(), IsWordEqual(val, IsTrueConstant()));
+ Node* value = Parameter(Type::Number());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsWordEqual(value, IsTrueConstant()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeFloat64ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* finish = reduction.replacement();
+ Node* value = Parameter(Type::Number());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeFloat64ToTagged(), value));
+ ASSERT_TRUE(r.Changed());
Capture<Node*> heap_number;
EXPECT_THAT(
- finish,
+ r.replacement(),
IsFinish(
AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
+ IsAllocateHeapNumber(IsValueEffect(value), graph()->start())),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number),
- IsIntPtrConstant(HeapNumberValueOffset()), val,
- CaptureEq(&heap_number), graph()->start())));
+ IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
+ value, CaptureEq(&heap_number), graph()->start())));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeInt32ToTaggedWithSignedSmall) {
+ Node* value = Parameter(Type::SignedSmall());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeInt32ToTagged(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeInt32ToSmi(value));
}
-TARGET_TEST_P(ChangeLoweringCommonTest, StringAdd) {
- Node* node =
- graph()->NewNode(simplified()->StringAdd(), Parameter(0), Parameter(1));
- Reduction reduction = Reduce(node);
- EXPECT_FALSE(reduction.Changed());
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeUint32ToTaggedWithUnsignedSmall) {
+ Node* value = Parameter(Type::UnsignedSmall());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeUint32ToSmi(value));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToInt32WithTaggedSigned) {
+ Node* value = Parameter(Type::TaggedSigned());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeSmiToInt32(value));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToInt32WithTaggedPointer) {
+ Node* value = Parameter(Type::TaggedPointer());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeFloat64ToInt32(
+ IsLoadHeapNumber(value, graph()->start())));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToUint32WithTaggedSigned) {
+ Node* value = Parameter(Type::TaggedSigned());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeSmiToInt32(value));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToUint32WithTaggedPointer) {
+ Node* value = Parameter(Type::TaggedPointer());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeFloat64ToUint32(
+ IsLoadHeapNumber(value, graph()->start())));
}
@@ -179,26 +211,24 @@ class ChangeLowering32Test : public ChangeLoweringTest {
TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
- NodeProperties::SetBounds(val, Bounds(Type::None(), Type::Signed32()));
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Integral32());
+ Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> add, branch, heap_number, if_true;
EXPECT_THAT(
- phi,
+ r.replacement(),
IsPhi(kMachAnyTagged,
IsFinish(AllOf(CaptureEq(&heap_number),
IsAllocateHeapNumber(_, CaptureEq(&if_true))),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number),
- IsIntPtrConstant(HeapNumberValueOffset()),
- IsChangeInt32ToFloat64(val),
+ IsIntPtrConstant(HeapNumber::kValueOffset -
+ kHeapObjectTag),
+ IsChangeInt32ToFloat64(value),
CaptureEq(&heap_number), CaptureEq(&if_true))),
- IsProjection(
- 0, AllOf(CaptureEq(&add), IsInt32AddWithOverflow(val, val))),
+ IsProjection(0, AllOf(CaptureEq(&add),
+ IsInt32AddWithOverflow(value, value))),
IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
IsIfFalse(AllOf(CaptureEq(&branch),
IsBranch(IsProjection(1, CaptureEq(&add)),
@@ -206,43 +236,27 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
}
-TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTaggedSmall) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
- NodeProperties::SetBounds(val, Bounds(Type::None(), Type::Signed31()));
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* change = reduction.replacement();
- Capture<Node*> add, branch, heap_number, if_true;
- EXPECT_THAT(change, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())));
-}
-
-
TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Number());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(
- kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)),
- IsChangeInt32ToFloat64(
- IsWord32Sar(val, IsInt32Constant(SmiShiftAmount()))),
- IsMerge(
- AllOf(CaptureEq(&if_true),
- IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
- graph()->start())))),
- IsIfFalse(CaptureEq(&branch)))));
+ r.replacement(),
+ IsPhi(kMachFloat64, IsLoadHeapNumber(value, CaptureEq(&if_true)),
+ IsChangeInt32ToFloat64(IsWord32Sar(
+ value, IsInt32Constant(kSmiTagSize + kSmiShiftSize))),
+ IsMerge(AllOf(CaptureEq(&if_true),
+ IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(
+ value, IsInt32Constant(kSmiTagMask)),
+ graph()->start())))),
+ IsIfFalse(CaptureEq(&branch)))));
}
@@ -250,23 +264,22 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToInt32) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Signed32());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(kMachInt32,
- IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
- IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
- graph()->start()))))));
+ r.replacement(),
+ IsPhi(
+ kMachInt32,
+ IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
+ IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(value, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
}
@@ -274,23 +287,22 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToUint32) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Unsigned32());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(kMachUint32,
- IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
- IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
- graph()->start()))))));
+ r.replacement(),
+ IsPhi(
+ kMachUint32,
+ IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
+ IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(value, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
}
@@ -298,30 +310,30 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Number());
+ Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, heap_number, if_false;
EXPECT_THAT(
- phi,
+ r.replacement(),
IsPhi(
- kMachAnyTagged, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())),
+ kMachAnyTagged,
+ IsWord32Shl(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
IsFinish(AllOf(CaptureEq(&heap_number),
IsAllocateHeapNumber(_, CaptureEq(&if_false))),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number),
- IsInt32Constant(HeapNumberValueOffset()),
- IsChangeUint32ToFloat64(val),
+ IsInt32Constant(HeapNumber::kValueOffset -
+ kHeapObjectTag),
+ IsChangeUint32ToFloat64(value),
CaptureEq(&heap_number), CaptureEq(&if_false))),
- IsMerge(
- IsIfTrue(AllOf(CaptureEq(&branch),
- IsBranch(IsUint32LessThanOrEqual(
- val, IsInt32Constant(SmiMaxValue())),
- graph()->start()))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+ IsMerge(IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsUint32LessThanOrEqual(
+ value, IsInt32Constant(Smi::kMaxValue)),
+ graph()->start()))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
}
@@ -337,14 +349,11 @@ class ChangeLowering64Test : public ChangeLoweringTest {
TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- EXPECT_THAT(reduction.replacement(),
- IsWord64Shl(IsChangeInt32ToInt64(val),
- IsInt64Constant(SmiShiftAmount())));
+ Node* value = Parameter(Type::Signed32());
+ Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeInt32ToSmi(value));
}
@@ -352,26 +361,23 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Number());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(
- kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)),
- IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
- IsWord64Sar(val, IsInt64Constant(SmiShiftAmount())))),
- IsMerge(
- AllOf(CaptureEq(&if_true),
- IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
- graph()->start())))),
- IsIfFalse(CaptureEq(&branch)))));
+ r.replacement(),
+ IsPhi(kMachFloat64, IsLoadHeapNumber(value, CaptureEq(&if_true)),
+ IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(IsWord64Sar(
+ value, IsInt64Constant(kSmiTagSize + kSmiShiftSize)))),
+ IsMerge(AllOf(CaptureEq(&if_true),
+ IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(
+ value, IsInt64Constant(kSmiTagMask)),
+ graph()->start())))),
+ IsIfFalse(CaptureEq(&branch)))));
}
@@ -379,24 +385,23 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Signed32());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(kMachInt32,
- IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
- IsTruncateInt64ToInt32(
- IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
- graph()->start()))))));
+ r.replacement(),
+ IsPhi(
+ kMachInt32,
+ IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
+ IsTruncateInt64ToInt32(
+ IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(value, IsInt64Constant(kSmiTagMask)),
+ graph()->start()))))));
}
@@ -404,24 +409,23 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Unsigned32());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(kMachUint32,
- IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
- IsTruncateInt64ToInt32(
- IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
- graph()->start()))))));
+ r.replacement(),
+ IsPhi(
+ kMachUint32,
+ IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
+ IsTruncateInt64ToInt32(
+ IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(value, IsInt64Constant(kSmiTagMask)),
+ graph()->start()))))));
}
@@ -429,31 +433,31 @@ TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Number());
+ Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, heap_number, if_false;
EXPECT_THAT(
- phi,
+ r.replacement(),
IsPhi(
- kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
- IsInt64Constant(SmiShiftAmount())),
+ kMachAnyTagged,
+ IsWord64Shl(IsChangeUint32ToUint64(value),
+ IsInt64Constant(kSmiTagSize + kSmiShiftSize)),
IsFinish(AllOf(CaptureEq(&heap_number),
IsAllocateHeapNumber(_, CaptureEq(&if_false))),
IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
CaptureEq(&heap_number),
- IsInt64Constant(HeapNumberValueOffset()),
- IsChangeUint32ToFloat64(val),
+ IsInt64Constant(HeapNumber::kValueOffset -
+ kHeapObjectTag),
+ IsChangeUint32ToFloat64(value),
CaptureEq(&heap_number), CaptureEq(&if_false))),
- IsMerge(
- IsIfTrue(AllOf(CaptureEq(&branch),
- IsBranch(IsUint32LessThanOrEqual(
- val, IsInt32Constant(SmiMaxValue())),
- graph()->start()))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+ IsMerge(IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsUint32LessThanOrEqual(
+ value, IsInt32Constant(Smi::kMaxValue)),
+ graph()->start()))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
index 1f6044b97c..3b60e5b9bd 100644
--- a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -4,9 +4,13 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/machine-type.h"
#include "src/compiler/operator.h"
#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
namespace v8 {
namespace internal {
@@ -15,14 +19,23 @@ namespace compiler {
class CommonOperatorReducerTest : public GraphTest {
public:
explicit CommonOperatorReducerTest(int num_parameters = 1)
- : GraphTest(num_parameters) {}
+ : GraphTest(num_parameters), machine_(zone()) {}
~CommonOperatorReducerTest() OVERRIDE {}
protected:
- Reduction Reduce(Node* node) {
- CommonOperatorReducer reducer;
+ Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kNoFlags) {
+ JSOperatorBuilder javascript(zone());
+ MachineOperatorBuilder machine(zone(), kMachPtr, flags);
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, &machine);
+ CommonOperatorReducer reducer(&jsgraph);
return reducer.Reduce(node);
}
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ MachineOperatorBuilder machine_;
};
@@ -78,9 +91,14 @@ TEST_F(CommonOperatorReducerTest, RedundantPhi) {
int const value_input_count = input_count - 1;
TRACED_FOREACH(MachineType, type, kMachineTypes) {
for (int i = 0; i < value_input_count; ++i) {
+ inputs[i] = graph()->start();
+ }
+ Node* merge = graph()->NewNode(common()->Merge(value_input_count),
+ value_input_count, inputs);
+ for (int i = 0; i < value_input_count; ++i) {
inputs[i] = input;
}
- inputs[value_input_count] = graph()->start();
+ inputs[value_input_count] = merge;
Reduction r = Reduce(graph()->NewNode(
common()->Phi(type, value_input_count), input_count, inputs));
ASSERT_TRUE(r.Changed());
@@ -90,6 +108,27 @@ TEST_F(CommonOperatorReducerTest, RedundantPhi) {
}
+TEST_F(CommonOperatorReducerTest, PhiToFloat64MaxOrFloat64Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Reduction r1 =
+ Reduce(graph()->NewNode(common()->Phi(kMachFloat64, 2), p1, p0, merge),
+ MachineOperatorBuilder::kFloat64Max);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsFloat64Max(p1, p0));
+ Reduction r2 =
+ Reduce(graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge),
+ MachineOperatorBuilder::kFloat64Min);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsFloat64Min(p0, p1));
+}
+
+
// -----------------------------------------------------------------------------
// Select
@@ -106,6 +145,23 @@ TEST_F(CommonOperatorReducerTest, RedundantSelect) {
}
}
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat64MaxOrFloat64Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Reduction r1 =
+ Reduce(graph()->NewNode(common()->Select(kMachFloat64), check, p1, p0),
+ MachineOperatorBuilder::kFloat64Max);
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsFloat64Max(p1, p0));
+ Reduction r2 =
+ Reduce(graph()->NewNode(common()->Select(kMachFloat64), check, p0, p1),
+ MachineOperatorBuilder::kFloat64Min);
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsFloat64Min(p0, p1));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/common-operator-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
index 6e60cfd12a..c0d25ea741 100644
--- a/deps/v8/test/unittests/compiler/common-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/common-operator-unittest.cc
@@ -53,6 +53,8 @@ const SharedOperator kSharedOperators[] = {
SHARED(End, Operator::kKontrol, 0, 0, 1, 0, 0, 0),
SHARED(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
SHARED(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
+ SHARED(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
+ SHARED(IfException, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
SHARED(Throw, Operator::kFoldable, 1, 1, 1, 0, 0, 1),
SHARED(Return, Operator::kNoThrow, 1, 1, 1, 0, 0, 1)
#undef SHARED
diff --git a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
index 17716ab1a9..515bd061ef 100644
--- a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
@@ -17,7 +17,7 @@ namespace compiler {
do { \
Node* __n[] = {__VA_ARGS__}; \
ASSERT_TRUE(IsEquivalenceClass(arraysize(__n), __n)); \
- } while (false);
+ } while (false)
class ControlEquivalenceTest : public GraphTest {
public:
diff --git a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
index c083d4bab5..f300d07767 100644
--- a/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -21,25 +21,32 @@ namespace compiler {
class ControlFlowOptimizerTest : public GraphTest {
public:
explicit ControlFlowOptimizerTest(int num_parameters = 3)
- : GraphTest(num_parameters), machine_(zone()) {}
+ : GraphTest(num_parameters),
+ machine_(zone()),
+ javascript_(zone()),
+ jsgraph_(isolate(), graph(), common(), javascript(), machine()) {}
~ControlFlowOptimizerTest() OVERRIDE {}
protected:
void Optimize() {
- JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(isolate(), graph(), common(), &javascript, machine());
- ControlFlowOptimizer optimizer(&jsgraph, zone());
+ ControlFlowOptimizer optimizer(jsgraph(), zone());
optimizer.Optimize();
}
+ Node* EmptyFrameState() { return jsgraph()->EmptyFrameState(); }
+
+ JSGraph* jsgraph() { return &jsgraph_; }
+ JSOperatorBuilder* javascript() { return &javascript_; }
MachineOperatorBuilder* machine() { return &machine_; }
private:
MachineOperatorBuilder machine_;
+ JSOperatorBuilder javascript_;
+ JSGraph jsgraph_;
};
-TEST_F(ControlFlowOptimizerTest, Switch) {
+TEST_F(ControlFlowOptimizerTest, BuildSwitch1) {
Node* index = Parameter(0);
Node* branch0 = graph()->NewNode(
common()->Branch(),
@@ -65,6 +72,69 @@ TEST_F(ControlFlowOptimizerTest, Switch) {
IsSwitch(index, start()))))));
}
+
+TEST_F(ControlFlowOptimizerTest, BuildSwitch2) {
+ Node* input = Parameter(0);
+ Node* context = Parameter(1);
+ Node* index = FLAG_turbo_deoptimization
+ ? graph()->NewNode(javascript()->ToNumber(), input, context,
+ EmptyFrameState(), start(), start())
+ : graph()->NewNode(javascript()->ToNumber(), input, context,
+ start(), start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), index);
+ Node* branch0 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(0)),
+ if_success);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* branch1 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(1)),
+ if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* merge =
+ graph()->NewNode(common()->Merge(3), if_true0, if_true1, if_false1);
+ graph()->SetEnd(graph()->NewNode(common()->End(), merge));
+ Optimize();
+ Capture<Node*> switch_capture;
+ EXPECT_THAT(
+ end(),
+ IsEnd(IsMerge(IsIfValue(0, CaptureEq(&switch_capture)),
+ IsIfValue(1, CaptureEq(&switch_capture)),
+ IsIfDefault(AllOf(CaptureEq(&switch_capture),
+ IsSwitch(index, IsIfSuccess(index)))))));
+}
+
+
+TEST_F(ControlFlowOptimizerTest, CloneBranch) {
+ Node* cond0 = Parameter(0);
+ Node* cond1 = Parameter(1);
+ Node* cond2 = Parameter(2);
+ Node* branch0 = graph()->NewNode(common()->Branch(), cond0, start());
+ Node* control1 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* control2 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* merge0 = graph()->NewNode(common()->Merge(2), control1, control2);
+ Node* phi0 =
+ graph()->NewNode(common()->Phi(kRepBit, 2), cond1, cond2, merge0);
+ Node* branch = graph()->NewNode(common()->Branch(), phi0, merge0);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ graph()->SetEnd(graph()->NewNode(common()->End(), merge));
+ Optimize();
+ Capture<Node*> branch1_capture, branch2_capture;
+ EXPECT_THAT(
+ end(),
+ IsEnd(IsMerge(IsMerge(IsIfTrue(CaptureEq(&branch1_capture)),
+ IsIfTrue(CaptureEq(&branch2_capture))),
+ IsMerge(IsIfFalse(AllOf(CaptureEq(&branch1_capture),
+ IsBranch(cond1, control1))),
+ IsIfFalse(AllOf(CaptureEq(&branch2_capture),
+ IsBranch(cond2, control2)))))));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index afa1e94245..9138ab2ca6 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -11,9 +11,9 @@ namespace compiler {
namespace {
// Immediates (random subset).
-static const int32_t kImmediates[] = {
- kMinInt, -42, -1, 0, 1, 2, 3, 4, 5,
- 6, 7, 8, 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
+const int32_t kImmediates[] = {kMinInt, -42, -1, 0, 1, 2,
+ 3, 4, 5, 6, 7, 8,
+ 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
} // namespace
@@ -666,6 +666,44 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
}
}
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
+
+
+TEST_F(InstructionSelectorTest, Uint32LessThanWithLoadAndLoadStackPointer) {
+ StreamBuilder m(this, kMachBool);
+ Node* const sl = m.Load(
+ kMachPtr,
+ m.ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
+ Node* const sp = m.LoadStackPointer();
+ Node* const n = m.Uint32LessThan(sl, sp);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32StackCheck, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kUnsignedGreaterThan, s[0]->flags_condition());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Lzcnt, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
index d3e00c642c..e52580dc64 100644
--- a/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-selector-unittest.cc
@@ -4,7 +4,8 @@
#include "test/unittests/compiler/instruction-selector-unittest.h"
-#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/schedule.h"
#include "src/flags.h"
#include "test/unittests/compiler/compiler-test-utils.h"
@@ -346,9 +347,13 @@ TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
Node* receiver = m.Parameter(1);
Node* context = m.Parameter(2);
- Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(1));
- Node* locals = m.NewNode(m.common()->StateValues(0));
- Node* stack = m.NewNode(m.common()->StateValues(0));
+ ZoneVector<MachineType> int32_type(1, kMachInt32, zone());
+ ZoneVector<MachineType> empty_types(zone());
+
+ Node* parameters =
+ m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(1));
+ Node* locals = m.NewNode(m.common()->TypedStateValues(&empty_types));
+ Node* stack = m.NewNode(m.common()->TypedStateValues(&empty_types));
Node* context_dummy = m.Int32Constant(0);
Node* state_node = m.NewNode(
@@ -386,10 +391,17 @@ TARGET_TEST_F(InstructionSelectorTest, CallFunctionStubWithDeopt) {
Node* receiver = m.Parameter(1);
Node* context = m.Int32Constant(1); // Context is ignored.
+ ZoneVector<MachineType> int32_type(1, kMachInt32, zone());
+ ZoneVector<MachineType> float64_type(1, kMachFloat64, zone());
+ ZoneVector<MachineType> tagged_type(1, kMachAnyTagged, zone());
+
// Build frame state for the state before the call.
- Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
- Node* locals = m.NewNode(m.common()->StateValues(1), m.Float64Constant(0.5));
- Node* stack = m.NewNode(m.common()->StateValues(1), m.UndefinedConstant());
+ Node* parameters =
+ m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
+ Node* locals = m.NewNode(m.common()->TypedStateValues(&float64_type),
+ m.Float64Constant(0.5));
+ Node* stack = m.NewNode(m.common()->TypedStateValues(&tagged_type),
+ m.UndefinedConstant());
Node* context_sentinel = m.Int32Constant(0);
Node* frame_state_before = m.NewNode(
@@ -472,10 +484,17 @@ TARGET_TEST_F(InstructionSelectorTest,
Node* receiver = m.Parameter(1);
Node* context = m.Int32Constant(66);
+ ZoneVector<MachineType> int32_type(1, kMachInt32, zone());
+ ZoneVector<MachineType> int32x2_type(2, kMachInt32, zone());
+ ZoneVector<MachineType> float64_type(1, kMachFloat64, zone());
+
// Build frame state for the state before the call.
- Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(63));
- Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(64));
- Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(65));
+ Node* parameters =
+ m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(63));
+ Node* locals =
+ m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(64));
+ Node* stack =
+ m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(65));
Node* frame_state_parent =
m.NewNode(m.common()->FrameState(JS_FRAME, bailout_id_parent,
OutputFrameStateCombine::Ignore()),
@@ -483,11 +502,11 @@ TARGET_TEST_F(InstructionSelectorTest,
Node* context2 = m.Int32Constant(46);
Node* parameters2 =
- m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
- Node* locals2 =
- m.NewNode(m.common()->StateValues(1), m.Float64Constant(0.25));
- Node* stack2 = m.NewNode(m.common()->StateValues(2), m.Int32Constant(44),
- m.Int32Constant(45));
+ m.NewNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
+ Node* locals2 = m.NewNode(m.common()->TypedStateValues(&float64_type),
+ m.Float64Constant(0.25));
+ Node* stack2 = m.NewNode(m.common()->TypedStateValues(&int32x2_type),
+ m.Int32Constant(44), m.Int32Constant(45));
Node* frame_state_before =
m.NewNode(m.common()->FrameState(JS_FRAME, bailout_id_before,
OutputFrameStateCombine::Push()),
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
index 001fb11d13..c2e626fd59 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -40,7 +40,6 @@ InstructionSequenceTest::InstructionSequenceTest()
num_general_registers_(kDefaultNRegs),
num_double_registers_(kDefaultNRegs),
instruction_blocks_(zone()),
- current_instruction_index_(-1),
current_block_(nullptr),
block_returns_(false) {
InitializeRegisterNames();
@@ -100,8 +99,8 @@ void InstructionSequenceTest::StartBlock() {
}
-int InstructionSequenceTest::EndBlock(BlockCompletion completion) {
- int instruction_index = kMinInt;
+Instruction* InstructionSequenceTest::EndBlock(BlockCompletion completion) {
+ Instruction* result = nullptr;
if (block_returns_) {
CHECK(completion.type_ == kBlockEnd || completion.type_ == kFallThrough);
completion.type_ = kBlockEnd;
@@ -110,22 +109,22 @@ int InstructionSequenceTest::EndBlock(BlockCompletion completion) {
case kBlockEnd:
break;
case kFallThrough:
- instruction_index = EmitFallThrough();
+ result = EmitFallThrough();
break;
case kJump:
CHECK(!block_returns_);
- instruction_index = EmitJump();
+ result = EmitJump();
break;
case kBranch:
CHECK(!block_returns_);
- instruction_index = EmitBranch(completion.op_);
+ result = EmitBranch(completion.op_);
break;
}
completions_.push_back(completion);
CHECK(current_block_ != nullptr);
sequence()->EndBlock(current_block_->rpo_number());
current_block_ = nullptr;
- return instruction_index;
+ return result;
}
@@ -139,15 +138,15 @@ InstructionSequenceTest::VReg InstructionSequenceTest::Define(
TestOperand output_op) {
VReg vreg = NewReg();
InstructionOperand outputs[1]{ConvertOutputOp(vreg, output_op)};
- Emit(vreg.value_, kArchNop, 1, outputs);
+ Emit(kArchNop, 1, outputs);
return vreg;
}
-int InstructionSequenceTest::Return(TestOperand input_op_0) {
+Instruction* InstructionSequenceTest::Return(TestOperand input_op_0) {
block_returns_ = true;
InstructionOperand inputs[1]{ConvertInputOp(input_op_0)};
- return Emit(NewIndex(), kArchRet, 0, nullptr, 1, inputs);
+ return Emit(kArchRet, 0, nullptr, 1, inputs);
}
@@ -192,12 +191,12 @@ InstructionSequenceTest::VReg InstructionSequenceTest::DefineConstant(
VReg vreg = NewReg();
sequence()->AddConstant(vreg.value_, Constant(imm));
InstructionOperand outputs[1]{ConstantOperand(vreg.value_)};
- Emit(vreg.value_, kArchNop, 1, outputs);
+ Emit(kArchNop, 1, outputs);
return vreg;
}
-int InstructionSequenceTest::EmitNop() { return Emit(NewIndex(), kArchNop); }
+Instruction* InstructionSequenceTest::EmitNop() { return Emit(kArchNop); }
static size_t CountInputs(size_t size,
@@ -210,16 +209,17 @@ static size_t CountInputs(size_t size,
}
-int InstructionSequenceTest::EmitI(size_t input_size, TestOperand* inputs) {
+Instruction* InstructionSequenceTest::EmitI(size_t input_size,
+ TestOperand* inputs) {
InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
- return Emit(NewIndex(), kArchNop, 0, nullptr, input_size, mapped_inputs);
+ return Emit(kArchNop, 0, nullptr, input_size, mapped_inputs);
}
-int InstructionSequenceTest::EmitI(TestOperand input_op_0,
- TestOperand input_op_1,
- TestOperand input_op_2,
- TestOperand input_op_3) {
+Instruction* InstructionSequenceTest::EmitI(TestOperand input_op_0,
+ TestOperand input_op_1,
+ TestOperand input_op_2,
+ TestOperand input_op_3) {
TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
return EmitI(CountInputs(arraysize(inputs), inputs), inputs);
}
@@ -230,7 +230,7 @@ InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
VReg output_vreg = NewReg();
InstructionOperand outputs[1]{ConvertOutputOp(output_vreg, output_op)};
InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
- Emit(output_vreg.value_, kArchNop, 1, outputs, input_size, mapped_inputs);
+ Emit(kArchNop, 1, outputs, input_size, mapped_inputs);
return output_vreg;
}
@@ -243,14 +243,36 @@ InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
}
+InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
+ TestOperand output_op_0, TestOperand output_op_1, size_t input_size,
+ TestOperand* inputs) {
+ VRegPair output_vregs = std::make_pair(NewReg(), NewReg());
+ InstructionOperand outputs[2]{
+ ConvertOutputOp(output_vregs.first, output_op_0),
+ ConvertOutputOp(output_vregs.second, output_op_1)};
+ InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
+ Emit(kArchNop, 2, outputs, input_size, mapped_inputs);
+ return output_vregs;
+}
+
+
+InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
+ TestOperand output_op_0, TestOperand output_op_1, TestOperand input_op_0,
+ TestOperand input_op_1, TestOperand input_op_2, TestOperand input_op_3) {
+ TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
+ return EmitOOI(output_op_0, output_op_1,
+ CountInputs(arraysize(inputs), inputs), inputs);
+}
+
+
InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
TestOperand output_op, size_t input_size, TestOperand* inputs) {
VReg output_vreg = NewReg();
InstructionOperand outputs[1]{ConvertOutputOp(output_vreg, output_op)};
CHECK(UnallocatedOperand::cast(outputs[0]).HasFixedPolicy());
InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
- Emit(output_vreg.value_, kArchCallCodeObject, 1, outputs, input_size,
- mapped_inputs, 0, nullptr, true);
+ Emit(kArchCallCodeObject, 1, outputs, input_size, mapped_inputs, 0, nullptr,
+ true);
return output_vreg;
}
@@ -263,36 +285,26 @@ InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
}
-const Instruction* InstructionSequenceTest::GetInstruction(
- int instruction_index) {
- auto it = instructions_.find(instruction_index);
- CHECK(it != instructions_.end());
- return it->second;
-}
-
-
-int InstructionSequenceTest::EmitBranch(TestOperand input_op) {
+Instruction* InstructionSequenceTest::EmitBranch(TestOperand input_op) {
InstructionOperand inputs[4]{ConvertInputOp(input_op), ConvertInputOp(Imm()),
ConvertInputOp(Imm()), ConvertInputOp(Imm())};
InstructionCode opcode = kArchJmp | FlagsModeField::encode(kFlags_branch) |
FlagsConditionField::encode(kEqual);
- auto instruction =
- NewInstruction(opcode, 0, nullptr, 4, inputs)->MarkAsControl();
- return AddInstruction(NewIndex(), instruction);
+ auto instruction = NewInstruction(opcode, 0, nullptr, 4, inputs);
+ return AddInstruction(instruction);
}
-int InstructionSequenceTest::EmitFallThrough() {
- auto instruction = NewInstruction(kArchNop, 0, nullptr)->MarkAsControl();
- return AddInstruction(NewIndex(), instruction);
+Instruction* InstructionSequenceTest::EmitFallThrough() {
+ auto instruction = NewInstruction(kArchNop, 0, nullptr);
+ return AddInstruction(instruction);
}
-int InstructionSequenceTest::EmitJump() {
+Instruction* InstructionSequenceTest::EmitJump() {
InstructionOperand inputs[1]{ConvertInputOp(Imm())};
- auto instruction =
- NewInstruction(kArchJmp, 0, nullptr, 1, inputs)->MarkAsControl();
- return AddInstruction(NewIndex(), instruction);
+ auto instruction = NewInstruction(kArchJmp, 0, nullptr, 1, inputs);
+ return AddInstruction(instruction);
}
@@ -359,6 +371,9 @@ InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
case kRegister:
return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START);
+ case kSlot:
+ return Unallocated(op, UnallocatedOperand::MUST_HAVE_SLOT,
+ UnallocatedOperand::USED_AT_START);
case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
@@ -396,15 +411,14 @@ InstructionOperand InstructionSequenceTest::ConvertOutputOp(VReg vreg,
InstructionBlock* InstructionSequenceTest::NewBlock() {
CHECK(current_block_ == nullptr);
- auto block_id = BasicBlock::Id::FromSize(instruction_blocks_.size());
- Rpo rpo = Rpo::FromInt(block_id.ToInt());
+ Rpo rpo = Rpo::FromInt(static_cast<int>(instruction_blocks_.size()));
Rpo loop_header = Rpo::Invalid();
Rpo loop_end = Rpo::Invalid();
if (!loop_blocks_.empty()) {
auto& loop_data = loop_blocks_.back();
// This is a loop header.
if (!loop_data.loop_header_.IsValid()) {
- loop_end = Rpo::FromInt(block_id.ToInt() + loop_data.expected_blocks_);
+ loop_end = Rpo::FromInt(rpo.ToInt() + loop_data.expected_blocks_);
loop_data.expected_blocks_--;
loop_data.loop_header_ = rpo;
} else {
@@ -416,8 +430,8 @@ InstructionBlock* InstructionSequenceTest::NewBlock() {
}
}
// Construct instruction block.
- auto instruction_block = new (zone())
- InstructionBlock(zone(), block_id, rpo, loop_header, loop_end, false);
+ auto instruction_block =
+ new (zone()) InstructionBlock(zone(), rpo, loop_header, loop_end, false);
instruction_blocks_.push_back(instruction_block);
current_block_ = instruction_block;
sequence()->StartBlock(rpo);
@@ -458,23 +472,20 @@ void InstructionSequenceTest::WireBlock(size_t block_offset, int jump_offset) {
}
-int InstructionSequenceTest::Emit(int instruction_index, InstructionCode code,
- size_t outputs_size,
- InstructionOperand* outputs,
- size_t inputs_size,
- InstructionOperand* inputs, size_t temps_size,
- InstructionOperand* temps, bool is_call) {
+Instruction* InstructionSequenceTest::Emit(
+ InstructionCode code, size_t outputs_size, InstructionOperand* outputs,
+ size_t inputs_size, InstructionOperand* inputs, size_t temps_size,
+ InstructionOperand* temps, bool is_call) {
auto instruction = NewInstruction(code, outputs_size, outputs, inputs_size,
inputs, temps_size, temps);
if (is_call) instruction->MarkAsCall();
- return AddInstruction(instruction_index, instruction);
+ return AddInstruction(instruction);
}
-int InstructionSequenceTest::AddInstruction(int instruction_index,
- Instruction* instruction) {
+Instruction* InstructionSequenceTest::AddInstruction(Instruction* instruction) {
sequence()->AddInstruction(instruction);
- return instruction_index;
+ return instruction;
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
index 613e25883e..2d75da7e47 100644
--- a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
+++ b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
@@ -18,7 +18,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
static const int kDefaultNRegs = 4;
static const int kNoValue = kMinInt;
- typedef BasicBlock::RpoNumber Rpo;
+ typedef RpoNumber Rpo;
struct VReg {
VReg() : value_(kNoValue) {}
@@ -27,6 +27,8 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
int value_;
};
+ typedef std::pair<VReg, VReg> VRegPair;
+
enum TestOperandType {
kInvalid,
kSameAsFirst,
@@ -125,14 +127,14 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
void StartLoop(int loop_blocks);
void EndLoop();
void StartBlock();
- int EndBlock(BlockCompletion completion = FallThrough());
+ Instruction* EndBlock(BlockCompletion completion = FallThrough());
TestOperand Imm(int32_t imm = 0);
VReg Define(TestOperand output_op);
VReg Parameter(TestOperand output_op = Reg()) { return Define(output_op); }
- int Return(TestOperand input_op_0);
- int Return(VReg vreg) { return Return(Reg(vreg, 0)); }
+ Instruction* Return(TestOperand input_op_0);
+ Instruction* Return(VReg vreg) { return Return(Reg(vreg, 0)); }
PhiInstruction* Phi(VReg incoming_vreg_0 = VReg(),
VReg incoming_vreg_1 = VReg(),
@@ -142,27 +144,30 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
void SetInput(PhiInstruction* phi, size_t input, VReg vreg);
VReg DefineConstant(int32_t imm = 0);
- int EmitNop();
- int EmitI(size_t input_size, TestOperand* inputs);
- int EmitI(TestOperand input_op_0 = TestOperand(),
- TestOperand input_op_1 = TestOperand(),
- TestOperand input_op_2 = TestOperand(),
- TestOperand input_op_3 = TestOperand());
+ Instruction* EmitNop();
+ Instruction* EmitI(size_t input_size, TestOperand* inputs);
+ Instruction* EmitI(TestOperand input_op_0 = TestOperand(),
+ TestOperand input_op_1 = TestOperand(),
+ TestOperand input_op_2 = TestOperand(),
+ TestOperand input_op_3 = TestOperand());
VReg EmitOI(TestOperand output_op, size_t input_size, TestOperand* inputs);
VReg EmitOI(TestOperand output_op, TestOperand input_op_0 = TestOperand(),
TestOperand input_op_1 = TestOperand(),
TestOperand input_op_2 = TestOperand(),
TestOperand input_op_3 = TestOperand());
+ VRegPair EmitOOI(TestOperand output_op_0, TestOperand output_op_1,
+ size_t input_size, TestOperand* inputs);
+ VRegPair EmitOOI(TestOperand output_op_0, TestOperand output_op_1,
+ TestOperand input_op_0 = TestOperand(),
+ TestOperand input_op_1 = TestOperand(),
+ TestOperand input_op_2 = TestOperand(),
+ TestOperand input_op_3 = TestOperand());
VReg EmitCall(TestOperand output_op, size_t input_size, TestOperand* inputs);
VReg EmitCall(TestOperand output_op, TestOperand input_op_0 = TestOperand(),
TestOperand input_op_1 = TestOperand(),
TestOperand input_op_2 = TestOperand(),
TestOperand input_op_3 = TestOperand());
- // Get defining instruction vreg or value returned at instruction creation
- // time when there is no return value.
- const Instruction* GetInstruction(int instruction_index);
-
InstructionBlock* current_block() const { return current_block_; }
int num_general_registers() const { return num_general_registers_; }
int num_double_registers() const { return num_double_registers_; }
@@ -172,13 +177,12 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
private:
VReg NewReg() { return VReg(sequence()->NextVirtualRegister()); }
- int NewIndex() { return current_instruction_index_--; }
static TestOperand Invalid() { return TestOperand(kInvalid, VReg()); }
- int EmitBranch(TestOperand input_op);
- int EmitFallThrough();
- int EmitJump();
+ Instruction* EmitBranch(TestOperand input_op);
+ Instruction* EmitFallThrough();
+ Instruction* EmitJump();
Instruction* NewInstruction(InstructionCode code, size_t outputs_size,
InstructionOperand* outputs,
size_t inputs_size = 0,
@@ -202,12 +206,13 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
InstructionBlock* NewBlock();
void WireBlock(size_t block_offset, int jump_offset);
- int Emit(int instruction_index, InstructionCode code, size_t outputs_size = 0,
- InstructionOperand* outputs = nullptr, size_t inputs_size = 0,
- InstructionOperand* inputs = nullptr, size_t temps_size = 0,
- InstructionOperand* temps = nullptr, bool is_call = false);
+ Instruction* Emit(InstructionCode code, size_t outputs_size = 0,
+ InstructionOperand* outputs = nullptr,
+ size_t inputs_size = 0,
+ InstructionOperand* inputs = nullptr, size_t temps_size = 0,
+ InstructionOperand* temps = nullptr, bool is_call = false);
- int AddInstruction(int instruction_index, Instruction* instruction);
+ Instruction* AddInstruction(Instruction* instruction);
struct LoopData {
Rpo loop_header_;
@@ -226,7 +231,6 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
// Block building state.
InstructionBlocks instruction_blocks_;
Instructions instructions_;
- int current_instruction_index_;
Completions completions_;
LoopBlocks loop_blocks_;
InstructionBlock* current_block_;
diff --git a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
index b5c688e147..5c508a5d4b 100644
--- a/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -62,58 +62,6 @@ Type* const kNumberTypes[] = {
// -----------------------------------------------------------------------------
-// Math.abs
-
-
-TEST_F(JSBuiltinReducerTest, MathAbs) {
- Handle<JSFunction> f = MathFunction("abs");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call);
-
- if (t0->Is(Type::Unsigned32())) {
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), p0);
- } else {
- Capture<Node*> branch;
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsSelect(kMachNone,
- IsNumberLessThan(IsNumberConstant(BitEq(0.0)), p0), p0,
- IsNumberSubtract(IsNumberConstant(BitEq(0.0)), p0)));
- }
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Math.sqrt
-
-
-TEST_F(JSBuiltinReducerTest, MathSqrt) {
- Handle<JSFunction> f = MathFunction("sqrt");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Sqrt(p0));
- }
-}
-
-
-// -----------------------------------------------------------------------------
// Math.max
@@ -223,79 +171,6 @@ TEST_F(JSBuiltinReducerTest, MathFround) {
}
}
-
-// -----------------------------------------------------------------------------
-// Math.floor
-
-
-TEST_F(JSBuiltinReducerTest, MathFloorAvailable) {
- Handle<JSFunction> f = MathFunction("floor");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Floor);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Floor(p0));
- }
-}
-
-
-TEST_F(JSBuiltinReducerTest, MathFloorUnavailable) {
- Handle<JSFunction> f = MathFunction("floor");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
-
- ASSERT_FALSE(r.Changed());
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Math.ceil
-
-
-TEST_F(JSBuiltinReducerTest, MathCeilAvailable) {
- Handle<JSFunction> f = MathFunction("ceil");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Ceil);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Ceil(p0));
- }
-}
-
-
-TEST_F(JSBuiltinReducerTest, MathCeilUnavailable) {
- Handle<JSFunction> f = MathFunction("ceil");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
-
- ASSERT_FALSE(r.Changed());
- }
-}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 20d5c069fe..8adbc54ac2 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -27,8 +27,9 @@ class JSIntrinsicLoweringTest : public GraphTest {
~JSIntrinsicLoweringTest() OVERRIDE {}
protected:
- Reduction Reduce(Node* node) {
- MachineOperatorBuilder machine(zone());
+ Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kNoFlags) {
+ MachineOperatorBuilder machine(zone(), kMachPtr, flags);
JSGraph jsgraph(isolate(), graph(), common(), javascript(), &machine);
JSIntrinsicLowering reducer(&jsgraph);
return reducer.Reduce(node);
@@ -42,6 +43,61 @@ class JSIntrinsicLoweringTest : public GraphTest {
// -----------------------------------------------------------------------------
+// %_ConstructDouble
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineOptimizedConstructDouble) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineConstructDouble, 2), input0,
+ input1, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64InsertHighWord32(
+ IsFloat64InsertLowWord32(
+ IsNumberConstant(BitEq(0.0)), input1),
+ input0));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_DoubleLo
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleLo) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleLo, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64ExtractLowWord32(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_DoubleHi
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleHi) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleHi, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64ExtractHighWord32(input));
+}
+
+
+// -----------------------------------------------------------------------------
// %_IsSmi
@@ -169,6 +225,94 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsRegExp) {
// -----------------------------------------------------------------------------
+// %_JSValueGetValue
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineJSValueGetValue) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineJSValueGetValue, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsLoadField(AccessBuilder::ForValue(), input, effect, control));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_MathFloor
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineMathFloor) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathFloor, 1),
+ input, context, effect, control),
+ MachineOperatorBuilder::kFloat64RoundDown);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64RoundDown(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_MathSqrt
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineMathSqrt) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathSqrt, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Sqrt(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_StringGetLength
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineStringGetLength) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineStringGetLength, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
+ input, effect, control));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_MathClz32
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineMathClz32) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathClz32, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsWord32Clz(input));
+}
+
+
+// -----------------------------------------------------------------------------
// %_ValueOf
diff --git a/deps/v8/test/unittests/compiler/js-operator-unittest.cc b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
index 8f4622ae67..7ecaed016d 100644
--- a/deps/v8/test/unittests/compiler/js-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-operator-unittest.cc
@@ -28,6 +28,7 @@ struct SharedOperator {
int control_input_count;
int value_output_count;
int effect_output_count;
+ int control_output_count;
};
@@ -39,48 +40,48 @@ std::ostream& operator<<(std::ostream& os, const SharedOperator& sop) {
const SharedOperator kSharedOperators[] = {
#define SHARED(Name, properties, value_input_count, frame_state_input_count, \
effect_input_count, control_input_count, value_output_count, \
- effect_output_count) \
+ effect_output_count, control_output_count) \
{ \
&JSOperatorBuilder::Name, IrOpcode::kJS##Name, properties, \
value_input_count, frame_state_input_count, effect_input_count, \
- control_input_count, value_output_count, effect_output_count \
+ control_input_count, value_output_count, effect_output_count, \
+ control_output_count \
}
- SHARED(Equal, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(StrictEqual, Operator::kPure, 2, 0, 0, 0, 1, 0),
- SHARED(StrictNotEqual, Operator::kPure, 2, 0, 0, 0, 1, 0),
- SHARED(LessThan, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(GreaterThan, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(BitwiseOr, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(BitwiseXor, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(BitwiseAnd, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(ShiftLeft, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(ShiftRight, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(ShiftRightLogical, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Add, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Subtract, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Multiply, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Divide, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Modulus, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(UnaryNot, Operator::kPure, 1, 0, 0, 0, 1, 0),
- SHARED(ToBoolean, Operator::kPure, 1, 0, 0, 0, 1, 0),
- SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1),
- SHARED(ToString, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1),
- SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1),
- SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(Create, Operator::kEliminatable, 0, 0, 1, 1, 1, 1),
- SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0),
- SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Debugger, Operator::kNoProperties, 0, 0, 1, 1, 0, 1),
- SHARED(CreateFunctionContext, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(CreateWithContext, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(CreateBlockContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
- SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
- SHARED(CreateScriptContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1)
+ SHARED(Equal, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(StrictEqual, Operator::kPure, 2, 0, 0, 0, 1, 0, 0),
+ SHARED(StrictNotEqual, Operator::kPure, 2, 0, 0, 0, 1, 0, 0),
+ SHARED(LessThan, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(GreaterThan, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(BitwiseOr, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(BitwiseXor, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(BitwiseAnd, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(ShiftLeft, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(ShiftRight, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(ShiftRightLogical, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(Add, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(Subtract, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(Multiply, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(Divide, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(Modulus, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(UnaryNot, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
+ SHARED(ToBoolean, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
+ SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
+ SHARED(ToString, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
+ SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
+ SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
+ SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
+ SHARED(Create, Operator::kEliminatable, 0, 0, 1, 0, 1, 1, 0),
+ SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
+ SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(CreateFunctionContext, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
+ SHARED(CreateWithContext, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(CreateBlockContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
+ SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
+ SHARED(CreateScriptContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2)
#undef SHARED
};
@@ -122,7 +123,7 @@ TEST_P(JSSharedOperatorTest, NumberOfInputsAndOutputs) {
EXPECT_EQ(sop.value_output_count, op->ValueOutputCount());
EXPECT_EQ(sop.effect_output_count, op->EffectOutputCount());
- EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(sop.control_output_count, op->ControlOutputCount());
}
@@ -169,7 +170,7 @@ TEST_P(JSStorePropertyOperatorTest, NumberOfInputsAndOutputs) {
const Operator* op = javascript.StoreProperty(mode);
// TODO(jarin): Get rid of this hack.
- const int frame_state_input_count = FLAG_turbo_deoptimization ? 1 : 0;
+ const int frame_state_input_count = FLAG_turbo_deoptimization ? 2 : 0;
EXPECT_EQ(3, op->ValueInputCount());
EXPECT_EQ(1, OperatorProperties::GetContextInputCount(op));
EXPECT_EQ(frame_state_input_count,
@@ -181,7 +182,7 @@ TEST_P(JSStorePropertyOperatorTest, NumberOfInputsAndOutputs) {
EXPECT_EQ(0, op->ValueOutputCount());
EXPECT_EQ(1, op->EffectOutputCount());
- EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(2, op->ControlOutputCount());
}
diff --git a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
index d61a1817b2..d347c4139b 100644
--- a/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -8,6 +8,7 @@
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -119,6 +120,16 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithBoolean) {
}
+TEST_F(JSTypedLoweringTest, JSUnaryNotWithOrderedNumber) {
+ Node* input = Parameter(Type::OrderedNumber(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberEqual(input, IsNumberConstant(0)));
+}
+
+
TEST_F(JSTypedLoweringTest, JSUnaryNotWithFalsish) {
Node* input = Parameter(
Type::Union(
@@ -173,13 +184,25 @@ TEST_F(JSTypedLoweringTest, JSUnaryNotWithNonZeroPlainNumber) {
}
+TEST_F(JSTypedLoweringTest, JSUnaryNotWithString) {
+ Node* input = Parameter(Type::String(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberEqual(IsLoadField(AccessBuilder::ForStringLength(), input,
+ graph()->start(), graph()->start()),
+ IsNumberConstant(0.0)));
+}
+
+
TEST_F(JSTypedLoweringTest, JSUnaryNotWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
Reduction r =
Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsBooleanNot(IsAnyToBoolean(input)));
+ ASSERT_FALSE(r.Changed());
}
@@ -349,13 +372,37 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
}
+TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
+ Node* input = Parameter(Type::OrderedNumber(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsBooleanNot(IsNumberEqual(input, IsNumberConstant(0.0))));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
+ Node* input = Parameter(Type::String(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsNumberLessThan(IsNumberConstant(0.0),
+ IsLoadField(AccessBuilder::ForStringLength(), input,
+ graph()->start(), graph()->start())));
+}
+
+
TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
Reduction r =
Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsAnyToBoolean(input));
+ ASSERT_FALSE(r.Changed());
}
@@ -681,8 +728,9 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
Node* control = graph()->start();
Node* node = graph()->NewNode(javascript()->StoreProperty(language_mode),
base, key, value, context);
- if (FLAG_turbo_deoptimization) {
- node->AppendInput(zone(), UndefinedConstant());
+ for (int i = 0;
+ i < OperatorProperties::GetFrameStateInputCount(node->op()); i++) {
+ node->AppendInput(zone(), EmptyFrameState());
}
node->AppendInput(zone(), effect);
node->AppendInput(zone(), control);
@@ -726,8 +774,9 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
Node* control = graph()->start();
Node* node = graph()->NewNode(javascript()->StoreProperty(language_mode),
base, key, value, context);
- if (FLAG_turbo_deoptimization) {
- node->AppendInput(zone(), UndefinedConstant());
+ for (int i = 0;
+ i < OperatorProperties::GetFrameStateInputCount(node->op()); i++) {
+ node->AppendInput(zone(), EmptyFrameState());
}
node->AppendInput(zone(), effect);
node->AppendInput(zone(), control);
@@ -784,8 +833,9 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
Node* control = graph()->start();
Node* node = graph()->NewNode(javascript()->StoreProperty(language_mode),
base, key, value, context);
- if (FLAG_turbo_deoptimization) {
- node->AppendInput(zone(), UndefinedConstant());
+ for (int i = 0;
+ i < OperatorProperties::GetFrameStateInputCount(node->op()); i++) {
+ node->AppendInput(zone(), EmptyFrameState());
}
node->AppendInput(zone(), effect);
node->AppendInput(zone(), control);
diff --git a/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
new file mode 100644
index 0000000000..f7d0db354d
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/liveness-analyzer-unittest.cc
@@ -0,0 +1,373 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/liveness-analyzer.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/state-values-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using testing::MakeMatcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::StringMatchResultListener;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LivenessAnalysisTest : public GraphTest {
+ public:
+ explicit LivenessAnalysisTest(int locals_count = 4)
+ : locals_count_(locals_count),
+ machine_(zone(), kRepWord32),
+ javascript_(zone()),
+ jsgraph_(isolate(), graph(), common(), &javascript_, &machine_),
+ analyzer_(locals_count, zone()),
+ empty_values_(graph()->NewNode(common()->StateValues(0), 0, nullptr)),
+ next_checkpoint_id_(0),
+ current_block_(nullptr) {}
+
+
+ protected:
+ JSGraph* jsgraph() { return &jsgraph_; }
+
+ LivenessAnalyzer* analyzer() { return &analyzer_; }
+ void Run() {
+ StateValuesCache cache(jsgraph());
+ NonLiveFrameStateSlotReplacer replacer(&cache,
+ jsgraph()->UndefinedConstant(),
+ analyzer()->local_count(), zone());
+ analyzer()->Run(&replacer);
+ }
+
+ Node* Checkpoint() {
+ int ast_num = next_checkpoint_id_++;
+ int first_const = intconst_from_bailout_id(ast_num, locals_count_);
+
+ const Operator* locals_op = common()->StateValues(locals_count_);
+
+ ZoneVector<Node*> local_inputs(locals_count_, nullptr, zone());
+ for (int i = 0; i < locals_count_; i++) {
+ local_inputs[i] = jsgraph()->Int32Constant(i + first_const);
+ }
+ Node* locals =
+ graph()->NewNode(locals_op, locals_count_, &local_inputs.front());
+
+ const Operator* op = common()->FrameState(
+ JS_FRAME, BailoutId(ast_num), OutputFrameStateCombine::Ignore());
+ Node* result = graph()->NewNode(op, empty_values_, locals, empty_values_,
+ jsgraph()->UndefinedConstant(),
+ jsgraph()->UndefinedConstant());
+
+ current_block_->Checkpoint(result);
+ return result;
+ }
+
+ void Bind(int var) { current_block()->Bind(var); }
+ void Lookup(int var) { current_block()->Lookup(var); }
+
+ class CheckpointMatcher : public MatcherInterface<Node*> {
+ public:
+ explicit CheckpointMatcher(const char* liveness, Node* empty_values,
+ int locals_count, Node* replacement)
+ : liveness_(liveness),
+ empty_values_(empty_values),
+ locals_count_(locals_count),
+ replacement_(replacement) {}
+
+ void DescribeTo(std::ostream* os) const OVERRIDE {
+ *os << "is a frame state with '" << liveness_
+ << "' liveness, empty "
+ "parameters and empty expression stack";
+ }
+
+ bool MatchAndExplain(Node* frame_state,
+ MatchResultListener* listener) const OVERRIDE {
+ if (frame_state == NULL) {
+ *listener << "which is NULL";
+ return false;
+ }
+ DCHECK(frame_state->opcode() == IrOpcode::kFrameState);
+
+ FrameStateCallInfo state_info =
+ OpParameter<FrameStateCallInfo>(frame_state);
+ int ast_num = state_info.bailout_id().ToInt();
+ int first_const = intconst_from_bailout_id(ast_num, locals_count_);
+
+ if (empty_values_ != frame_state->InputAt(0)) {
+ *listener << "whose parameters are " << frame_state->InputAt(0)
+ << " but should have been " << empty_values_ << " (empty)";
+ return false;
+ }
+ if (empty_values_ != frame_state->InputAt(2)) {
+ *listener << "whose expression stack is " << frame_state->InputAt(2)
+ << " but should have been " << empty_values_ << " (empty)";
+ return false;
+ }
+ StateValuesAccess locals(frame_state->InputAt(1));
+ if (locals_count_ != static_cast<int>(locals.size())) {
+ *listener << "whose number of locals is " << locals.size()
+ << " but should have been " << locals_count_;
+ return false;
+ }
+ int i = 0;
+ for (StateValuesAccess::TypedNode value : locals) {
+ if (liveness_[i] == 'L') {
+ StringMatchResultListener value_listener;
+ if (value.node == replacement_) {
+ *listener << "whose local #" << i << " was " << value.node->opcode()
+ << " but should have been 'undefined'";
+ return false;
+ } else if (!IsInt32Constant(first_const + i)
+ .MatchAndExplain(value.node, &value_listener)) {
+ *listener << "whose local #" << i << " does not match";
+ if (value_listener.str() != "") {
+ *listener << ", " << value_listener.str();
+ }
+ return false;
+ }
+ } else if (liveness_[i] == '.') {
+ if (value.node != replacement_) {
+ *listener << "whose local #" << i << " is " << value.node
+ << " but should have been " << replacement_
+ << " (undefined)";
+ return false;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ i++;
+ }
+ return true;
+ }
+
+ private:
+ const char* liveness_;
+ Node* empty_values_;
+ int locals_count_;
+ Node* replacement_;
+ };
+
+ Matcher<Node*> IsCheckpointModuloLiveness(const char* liveness) {
+ return MakeMatcher(new CheckpointMatcher(liveness, empty_values_,
+ locals_count_,
+ jsgraph()->UndefinedConstant()));
+ }
+
+ LivenessAnalyzerBlock* current_block() { return current_block_; }
+ void set_current_block(LivenessAnalyzerBlock* block) {
+ current_block_ = block;
+ }
+
+ private:
+ static int intconst_from_bailout_id(int ast_num, int locals_count) {
+ return (locals_count + 1) * ast_num + 1;
+ }
+
+ int locals_count_;
+ MachineOperatorBuilder machine_;
+ JSOperatorBuilder javascript_;
+ JSGraph jsgraph_;
+ LivenessAnalyzer analyzer_;
+ Node* empty_values_;
+ int next_checkpoint_id_;
+ LivenessAnalyzerBlock* current_block_;
+};
+
+
+TEST_F(LivenessAnalysisTest, EmptyBlock) {
+ set_current_block(analyzer()->NewBlock());
+
+ Node* c1 = Checkpoint();
+
+ Run();
+
+ // Nothing is live.
+ EXPECT_THAT(c1, IsCheckpointModuloLiveness("...."));
+}
+
+
+TEST_F(LivenessAnalysisTest, SimpleLookup) {
+ set_current_block(analyzer()->NewBlock());
+
+ Node* c1 = Checkpoint();
+ Lookup(1);
+ Node* c2 = Checkpoint();
+
+ Run();
+
+ EXPECT_THAT(c1, IsCheckpointModuloLiveness(".L.."));
+ EXPECT_THAT(c2, IsCheckpointModuloLiveness("...."));
+}
+
+
+TEST_F(LivenessAnalysisTest, DiamondLookups) {
+ // Start block.
+ LivenessAnalyzerBlock* start = analyzer()->NewBlock();
+ set_current_block(start);
+ Node* c1_start = Checkpoint();
+
+ // First branch.
+ LivenessAnalyzerBlock* b1 = analyzer()->NewBlock(start);
+ set_current_block(b1);
+
+ Node* c1_b1 = Checkpoint();
+ Lookup(1);
+ Node* c2_b1 = Checkpoint();
+ Lookup(3);
+ Node* c3_b1 = Checkpoint();
+
+ // Second branch.
+ LivenessAnalyzerBlock* b2 = analyzer()->NewBlock(start);
+ set_current_block(b2);
+
+ Node* c1_b2 = Checkpoint();
+ Lookup(3);
+ Node* c2_b2 = Checkpoint();
+ Lookup(2);
+ Node* c3_b2 = Checkpoint();
+
+ // Merge block.
+ LivenessAnalyzerBlock* m = analyzer()->NewBlock(b1);
+ m->AddPredecessor(b2);
+ set_current_block(m);
+ Node* c1_m = Checkpoint();
+ Lookup(0);
+ Node* c2_m = Checkpoint();
+
+ Run();
+
+ EXPECT_THAT(c1_start, IsCheckpointModuloLiveness("LLLL"));
+
+ EXPECT_THAT(c1_b1, IsCheckpointModuloLiveness("LL.L"));
+ EXPECT_THAT(c2_b1, IsCheckpointModuloLiveness("L..L"));
+ EXPECT_THAT(c3_b1, IsCheckpointModuloLiveness("L..."));
+
+ EXPECT_THAT(c1_b2, IsCheckpointModuloLiveness("L.LL"));
+ EXPECT_THAT(c2_b2, IsCheckpointModuloLiveness("L.L."));
+ EXPECT_THAT(c3_b2, IsCheckpointModuloLiveness("L..."));
+
+ EXPECT_THAT(c1_m, IsCheckpointModuloLiveness("L..."));
+ EXPECT_THAT(c2_m, IsCheckpointModuloLiveness("...."));
+}
+
+
+TEST_F(LivenessAnalysisTest, DiamondLookupsAndBinds) {
+ // Start block.
+ LivenessAnalyzerBlock* start = analyzer()->NewBlock();
+ set_current_block(start);
+ Node* c1_start = Checkpoint();
+ Bind(0);
+ Node* c2_start = Checkpoint();
+
+ // First branch.
+ LivenessAnalyzerBlock* b1 = analyzer()->NewBlock(start);
+ set_current_block(b1);
+
+ Node* c1_b1 = Checkpoint();
+ Bind(2);
+ Bind(1);
+ Node* c2_b1 = Checkpoint();
+ Bind(3);
+ Node* c3_b1 = Checkpoint();
+
+ // Second branch.
+ LivenessAnalyzerBlock* b2 = analyzer()->NewBlock(start);
+ set_current_block(b2);
+
+ Node* c1_b2 = Checkpoint();
+ Lookup(2);
+ Node* c2_b2 = Checkpoint();
+ Bind(2);
+ Bind(3);
+ Node* c3_b2 = Checkpoint();
+
+ // Merge block.
+ LivenessAnalyzerBlock* m = analyzer()->NewBlock(b1);
+ m->AddPredecessor(b2);
+ set_current_block(m);
+ Node* c1_m = Checkpoint();
+ Lookup(0);
+ Lookup(1);
+ Lookup(2);
+ Lookup(3);
+ Node* c2_m = Checkpoint();
+
+ Run();
+
+ EXPECT_THAT(c1_start, IsCheckpointModuloLiveness(".LL."));
+ EXPECT_THAT(c2_start, IsCheckpointModuloLiveness("LLL."));
+
+ EXPECT_THAT(c1_b1, IsCheckpointModuloLiveness("L..."));
+ EXPECT_THAT(c2_b1, IsCheckpointModuloLiveness("LLL."));
+ EXPECT_THAT(c3_b1, IsCheckpointModuloLiveness("LLLL"));
+
+ EXPECT_THAT(c1_b2, IsCheckpointModuloLiveness("LLL."));
+ EXPECT_THAT(c2_b2, IsCheckpointModuloLiveness("LL.."));
+ EXPECT_THAT(c3_b2, IsCheckpointModuloLiveness("LLLL"));
+
+ EXPECT_THAT(c1_m, IsCheckpointModuloLiveness("LLLL"));
+ EXPECT_THAT(c2_m, IsCheckpointModuloLiveness("...."));
+}
+
+
+TEST_F(LivenessAnalysisTest, SimpleLoop) {
+ // Start block.
+ LivenessAnalyzerBlock* start = analyzer()->NewBlock();
+ set_current_block(start);
+ Node* c1_start = Checkpoint();
+ Bind(0);
+ Bind(1);
+ Bind(2);
+ Bind(3);
+ Node* c2_start = Checkpoint();
+
+ // Loop header block.
+ LivenessAnalyzerBlock* header = analyzer()->NewBlock(start);
+ set_current_block(header);
+ Node* c1_header = Checkpoint();
+ Lookup(0);
+ Bind(2);
+ Node* c2_header = Checkpoint();
+
+ // Inside-loop block.
+ LivenessAnalyzerBlock* in_loop = analyzer()->NewBlock(header);
+ set_current_block(in_loop);
+ Node* c1_in_loop = Checkpoint();
+ Bind(0);
+ Lookup(3);
+ Node* c2_in_loop = Checkpoint();
+
+ // Add back edge.
+ header->AddPredecessor(in_loop);
+
+ // After-loop block.
+ LivenessAnalyzerBlock* end = analyzer()->NewBlock(header);
+ set_current_block(end);
+ Node* c1_end = Checkpoint();
+ Lookup(1);
+ Lookup(2);
+ Node* c2_end = Checkpoint();
+
+ Run();
+
+ EXPECT_THAT(c1_start, IsCheckpointModuloLiveness("...."));
+ EXPECT_THAT(c2_start, IsCheckpointModuloLiveness("LL.L"));
+
+ EXPECT_THAT(c1_header, IsCheckpointModuloLiveness("LL.L"));
+ EXPECT_THAT(c2_header, IsCheckpointModuloLiveness(".LLL"));
+
+ EXPECT_THAT(c1_in_loop, IsCheckpointModuloLiveness(".L.L"));
+ EXPECT_THAT(c2_in_loop, IsCheckpointModuloLiveness("LL.L"));
+
+ EXPECT_THAT(c1_end, IsCheckpointModuloLiveness(".LL."));
+ EXPECT_THAT(c2_end, IsCheckpointModuloLiveness("...."));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index f63e70da5a..11c679cb29 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -233,6 +233,27 @@ const uint32_t kUint32Values[] = {
0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
+
+struct ComparisonBinaryOperator {
+ const Operator* (MachineOperatorBuilder::*constructor)();
+ const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os,
+ ComparisonBinaryOperator const& cbop) {
+ return os << cbop.constructor_name;
+}
+
+
+const ComparisonBinaryOperator kComparisonBinaryOperators[] = {
+#define OPCODE(Opcode) \
+ { &MachineOperatorBuilder::Opcode, #Opcode } \
+ ,
+ MACHINE_COMPARE_BINOP_LIST(OPCODE)
+#undef OPCODE
+};
+
} // namespace
@@ -595,6 +616,33 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithInt32AddAndConstant) {
}
+TEST_F(MachineOperatorReducerTest, Word32AndWithInt32MulAndConstant) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FORRANGE(int32_t, l, 1, 31) {
+ TRACED_FOREACH(int32_t, k, kInt32Values) {
+ if ((k << l) == 0) continue;
+
+ // (x * (K << L)) & (-1 << L) => x * (K << L)
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Int32Mul(), p0, Int32Constant(k << l)),
+ Int32Constant(-1 << l)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsInt32Mul(p0, IsInt32Constant(k << l)));
+
+ // ((K << L) * x) & (-1 << L) => x * (K << L)
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Int32Mul(), Int32Constant(k << l), p0),
+ Int32Constant(-1 << l)));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsInt32Mul(p0, IsInt32Constant(k << l)));
+ }
+ }
+}
+
+
TEST_F(MachineOperatorReducerTest,
Word32AndWithInt32AddAndInt32MulAndConstant) {
Node* const p0 = Parameter(0);
@@ -632,6 +680,27 @@ TEST_F(MachineOperatorReducerTest,
}
+TEST_F(MachineOperatorReducerTest, Word32AndWithComparisonAndConstantOne) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ TRACED_FOREACH(ComparisonBinaryOperator, cbop, kComparisonBinaryOperators) {
+ Node* cmp = graph()->NewNode((machine()->*cbop.constructor)(), p0, p1);
+
+ // cmp & 1 => cmp
+ Reduction const r1 =
+ Reduce(graph()->NewNode(machine()->Word32And(), cmp, Int32Constant(1)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(cmp, r1.replacement());
+
+ // 1 & cmp => cmp
+ Reduction const r2 =
+ Reduce(graph()->NewNode(machine()->Word32And(), Int32Constant(1), cmp));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(cmp, r2.replacement());
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Word32Xor
@@ -773,6 +842,24 @@ TEST_F(MachineOperatorReducerTest, Word32RorWithConstants) {
// Word32Sar
+TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndComparison) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+
+ TRACED_FOREACH(ComparisonBinaryOperator, cbop, kComparisonBinaryOperators) {
+ Node* cmp = graph()->NewNode((machine()->*cbop.constructor)(), p0, p1);
+
+ // cmp << 31 >> 31 => 0 - cmp
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Word32Sar(),
+ graph()->NewNode(machine()->Word32Shl(), cmp, Int32Constant(31)),
+ Int32Constant(31)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Sub(IsInt32Constant(0), cmp));
+ }
+}
+
+
TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndLoad) {
Node* const p0 = Parameter(0);
Node* const p1 = Parameter(1);
@@ -1190,6 +1277,28 @@ TEST_F(MachineOperatorReducerTest, Uint32ModWithParameters) {
// -----------------------------------------------------------------------------
+// Int32Add
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithInt32SubWithConstantZero) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Int32Add(),
+ graph()->NewNode(machine()->Int32Sub(), Int32Constant(0), p0), p1));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsInt32Sub(p1, p0));
+
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Int32Add(), p0,
+ graph()->NewNode(machine()->Int32Sub(), Int32Constant(0), p1)));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsInt32Sub(p0, p1));
+}
+
+
+// -----------------------------------------------------------------------------
// Int32AddWithOverflow
@@ -1327,6 +1436,46 @@ TEST_F(MachineOperatorReducerTest, Float64MulWithMinusOne) {
// -----------------------------------------------------------------------------
+// Float64InsertLowWord32
+
+
+TEST_F(MachineOperatorReducerTest, Float64InsertLowWord32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(uint32_t, y, kUint32Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64InsertLowWord32(),
+ Float64Constant(x), Uint32Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(BitEq(bit_cast<double>(
+ (bit_cast<uint64_t>(x) & V8_UINT64_C(0xFFFFFFFF00000000)) | y))));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Float64InsertHighWord32
+
+
+TEST_F(MachineOperatorReducerTest, Float64InsertHighWord32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(uint32_t, y, kUint32Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64InsertHighWord32(),
+ Float64Constant(x), Uint32Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(BitEq(bit_cast<double>(
+ (bit_cast<uint64_t>(x) & V8_UINT64_C(0xFFFFFFFF)) |
+ (static_cast<uint64_t>(y) << 32)))));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
// Store
diff --git a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
index 6e0df2ab44..71b3c0edd9 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-unittest.cc
@@ -180,24 +180,24 @@ const PureOperator kPureOperators[] = {
PURE(Word32And, 2, 0, 1), PURE(Word32Or, 2, 0, 1), PURE(Word32Xor, 2, 0, 1),
PURE(Word32Shl, 2, 0, 1), PURE(Word32Shr, 2, 0, 1),
PURE(Word32Sar, 2, 0, 1), PURE(Word32Ror, 2, 0, 1),
- PURE(Word32Equal, 2, 0, 1), PURE(Word64And, 2, 0, 1),
- PURE(Word64Or, 2, 0, 1), PURE(Word64Xor, 2, 0, 1), PURE(Word64Shl, 2, 0, 1),
- PURE(Word64Shr, 2, 0, 1), PURE(Word64Sar, 2, 0, 1),
- PURE(Word64Ror, 2, 0, 1), PURE(Word64Equal, 2, 0, 1),
- PURE(Int32Add, 2, 0, 1), PURE(Int32AddWithOverflow, 2, 0, 2),
- PURE(Int32Sub, 2, 0, 1), PURE(Int32SubWithOverflow, 2, 0, 2),
- PURE(Int32Mul, 2, 0, 1), PURE(Int32MulHigh, 2, 0, 1),
- PURE(Int32Div, 2, 1, 1), PURE(Uint32Div, 2, 1, 1), PURE(Int32Mod, 2, 1, 1),
- PURE(Uint32Mod, 2, 1, 1), PURE(Int32LessThan, 2, 0, 1),
- PURE(Int32LessThanOrEqual, 2, 0, 1), PURE(Uint32LessThan, 2, 0, 1),
- PURE(Uint32LessThanOrEqual, 2, 0, 1), PURE(Int64Add, 2, 0, 1),
- PURE(Int64Sub, 2, 0, 1), PURE(Int64Mul, 2, 0, 1), PURE(Int64Div, 2, 0, 1),
- PURE(Uint64Div, 2, 0, 1), PURE(Int64Mod, 2, 0, 1), PURE(Uint64Mod, 2, 0, 1),
- PURE(Int64LessThan, 2, 0, 1), PURE(Int64LessThanOrEqual, 2, 0, 1),
- PURE(Uint64LessThan, 2, 0, 1), PURE(ChangeFloat32ToFloat64, 1, 0, 1),
- PURE(ChangeFloat64ToInt32, 1, 0, 1), PURE(ChangeFloat64ToUint32, 1, 0, 1),
- PURE(ChangeInt32ToInt64, 1, 0, 1), PURE(ChangeUint32ToFloat64, 1, 0, 1),
- PURE(ChangeUint32ToUint64, 1, 0, 1),
+ PURE(Word32Equal, 2, 0, 1), PURE(Word32Clz, 1, 0, 1),
+ PURE(Word64And, 2, 0, 1), PURE(Word64Or, 2, 0, 1), PURE(Word64Xor, 2, 0, 1),
+ PURE(Word64Shl, 2, 0, 1), PURE(Word64Shr, 2, 0, 1),
+ PURE(Word64Sar, 2, 0, 1), PURE(Word64Ror, 2, 0, 1),
+ PURE(Word64Equal, 2, 0, 1), PURE(Int32Add, 2, 0, 1),
+ PURE(Int32AddWithOverflow, 2, 0, 2), PURE(Int32Sub, 2, 0, 1),
+ PURE(Int32SubWithOverflow, 2, 0, 2), PURE(Int32Mul, 2, 0, 1),
+ PURE(Int32MulHigh, 2, 0, 1), PURE(Int32Div, 2, 1, 1),
+ PURE(Uint32Div, 2, 1, 1), PURE(Int32Mod, 2, 1, 1), PURE(Uint32Mod, 2, 1, 1),
+ PURE(Int32LessThan, 2, 0, 1), PURE(Int32LessThanOrEqual, 2, 0, 1),
+ PURE(Uint32LessThan, 2, 0, 1), PURE(Uint32LessThanOrEqual, 2, 0, 1),
+ PURE(Int64Add, 2, 0, 1), PURE(Int64Sub, 2, 0, 1), PURE(Int64Mul, 2, 0, 1),
+ PURE(Int64Div, 2, 0, 1), PURE(Uint64Div, 2, 0, 1), PURE(Int64Mod, 2, 0, 1),
+ PURE(Uint64Mod, 2, 0, 1), PURE(Int64LessThan, 2, 0, 1),
+ PURE(Int64LessThanOrEqual, 2, 0, 1), PURE(Uint64LessThan, 2, 0, 1),
+ PURE(ChangeFloat32ToFloat64, 1, 0, 1), PURE(ChangeFloat64ToInt32, 1, 0, 1),
+ PURE(ChangeFloat64ToUint32, 1, 0, 1), PURE(ChangeInt32ToInt64, 1, 0, 1),
+ PURE(ChangeUint32ToFloat64, 1, 0, 1), PURE(ChangeUint32ToUint64, 1, 0, 1),
PURE(TruncateFloat64ToFloat32, 1, 0, 1),
PURE(TruncateFloat64ToInt32, 1, 0, 1), PURE(TruncateInt64ToInt32, 1, 0, 1),
PURE(Float64Add, 2, 0, 1), PURE(Float64Sub, 2, 0, 1),
@@ -205,8 +205,12 @@ const PureOperator kPureOperators[] = {
PURE(Float64Mod, 2, 0, 1), PURE(Float64Sqrt, 1, 0, 1),
PURE(Float64Equal, 2, 0, 1), PURE(Float64LessThan, 2, 0, 1),
PURE(Float64LessThanOrEqual, 2, 0, 1), PURE(LoadStackPointer, 0, 0, 1),
- PURE(Float64Floor, 1, 0, 1), PURE(Float64Ceil, 1, 0, 1),
- PURE(Float64RoundTruncate, 1, 0, 1), PURE(Float64RoundTiesAway, 1, 0, 1)
+ PURE(Float64RoundDown, 1, 0, 1), PURE(Float64RoundTruncate, 1, 0, 1),
+ PURE(Float64RoundTiesAway, 1, 0, 1), PURE(Float64ExtractLowWord32, 1, 0, 1),
+ PURE(Float64ExtractHighWord32, 1, 0, 1),
+ PURE(Float64InsertLowWord32, 2, 0, 1),
+ PURE(Float64InsertHighWord32, 2, 0, 1), PURE(Float64Max, 2, 0, 1),
+ PURE(Float64Min, 2, 0, 1)
#undef PURE
};
diff --git a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index efe26d22b4..bafa89d581 100644
--- a/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -800,6 +800,21 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsClz, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index 41453337f2..0953de8c40 100644
--- a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -166,29 +166,28 @@ const IntCmp kCmpInstructions[] = {
{{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMips64Cmp,
kMachInt64},
1U},
- {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp32,
- kMachInt32},
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp, kMachInt32},
1U},
- {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMips64Cmp32,
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMips64Cmp,
kMachInt32},
1U},
- {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp32,
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp,
kMachInt32},
1U},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kMips64Cmp32, kMachInt32},
+ kMips64Cmp, kMachInt32},
1U},
- {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMips64Cmp32,
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMips64Cmp,
kMachInt32},
1U},
{{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
- kMips64Cmp32, kMachInt32},
+ kMips64Cmp, kMachInt32},
1U},
- {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp32,
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp,
kMachUint32},
1U},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kMips64Cmp32, kMachUint32},
+ kMips64Cmp, kMachUint32},
1U}};
@@ -753,7 +752,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -765,7 +764,7 @@ TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -802,6 +801,21 @@ TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Clz, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
index b8375fab10..da887fe88a 100644
--- a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
@@ -124,6 +124,59 @@ TEST_F(MoveOptimizerTest, SplitsConstants) {
CHECK(Contains(move, Reg(0), Slot(2)));
}
+
+TEST_F(MoveOptimizerTest, SimpleMerge) {
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+ AddMove(LastGap(), Reg(0), Reg(1));
+
+ StartBlock();
+ EndBlock(Jump(1));
+ AddMove(LastGap(), Reg(0), Reg(1));
+
+ StartBlock();
+ EndBlock(Last());
+
+ Optimize();
+
+ auto move = LastGap()->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(move));
+ CHECK(Contains(move, Reg(0), Reg(1)));
+}
+
+
+TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+ auto gap_0 = LastGap();
+ AddMove(gap_0, Reg(0), Reg(1));
+ AddMove(LastGap(), Reg(1), Reg(0));
+
+ StartBlock();
+ EndBlock(Jump(1));
+ auto gap_1 = LastGap();
+ AddMove(gap_1, Reg(0), Reg(1));
+ AddMove(gap_1, Reg(1), Reg(0));
+
+ StartBlock();
+ EndBlock(Last());
+
+ Optimize();
+
+ CHECK(gap_0->IsRedundant());
+ CHECK(gap_1->IsRedundant());
+ auto move = LastGap()->parallel_moves()[0];
+ CHECK_EQ(2, NonRedundantSize(move));
+ CHECK(Contains(move, Reg(0), Reg(1)));
+ CHECK(Contains(move, Reg(1), Reg(0)));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-properties-unittest.cc b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
index bb471bd01e..2bec4faf4d 100644
--- a/deps/v8/test/unittests/compiler/node-properties-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-properties-unittest.cc
@@ -8,7 +8,9 @@
#include "testing/gmock/include/gmock/gmock.h"
using testing::AnyOf;
+using testing::ElementsAre;
using testing::IsNull;
+using testing::UnorderedElementsAre;
namespace v8 {
namespace internal {
@@ -17,6 +19,64 @@ namespace compiler {
typedef TestWithZone NodePropertiesTest;
+namespace {
+
+const Operator kMockOperator(IrOpcode::kDead, Operator::kNoProperties,
+ "MockOperator", 0, 0, 0, 1, 0, 0);
+const Operator kMockOpEffect(IrOpcode::kDead, Operator::kNoProperties,
+ "MockOpEffect", 0, 1, 0, 1, 1, 0);
+const Operator kMockOpControl(IrOpcode::kDead, Operator::kNoProperties,
+ "MockOpControl", 0, 0, 1, 1, 0, 1);
+const Operator kMockCallOperator(IrOpcode::kCall, Operator::kNoProperties,
+ "MockCallOperator", 0, 0, 0, 0, 0, 2);
+
+} // namespace
+
+
+TEST_F(NodePropertiesTest, ReplaceWithValue_ValueUse) {
+ CommonOperatorBuilder common(zone());
+ Node* node = Node::New(zone(), 0, &kMockOperator, 0, nullptr, false);
+ Node* use_value = Node::New(zone(), 0, common.Return(), 1, &node, false);
+ Node* replacement = Node::New(zone(), 0, &kMockOperator, 0, nullptr, false);
+ NodeProperties::ReplaceWithValue(node, replacement);
+ EXPECT_EQ(replacement, use_value->InputAt(0));
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_EQ(1, replacement->UseCount());
+ EXPECT_THAT(replacement->uses(), ElementsAre(use_value));
+}
+
+
+TEST_F(NodePropertiesTest, ReplaceWithValue_EffectUse) {
+ CommonOperatorBuilder common(zone());
+ Node* start = Node::New(zone(), 0, common.Start(1), 0, nullptr, false);
+ Node* node = Node::New(zone(), 0, &kMockOpEffect, 1, &start, false);
+ Node* use_effect = Node::New(zone(), 0, common.EffectPhi(1), 1, &node, false);
+ Node* replacement = Node::New(zone(), 0, &kMockOperator, 0, nullptr, false);
+ NodeProperties::ReplaceWithValue(node, replacement);
+ EXPECT_EQ(start, use_effect->InputAt(0));
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_EQ(2, start->UseCount());
+ EXPECT_EQ(0, replacement->UseCount());
+ EXPECT_THAT(start->uses(), UnorderedElementsAre(use_effect, node));
+}
+
+
+TEST_F(NodePropertiesTest, ReplaceWithValue_ControlUse) {
+ CommonOperatorBuilder common(zone());
+ Node* start = Node::New(zone(), 0, common.Start(1), 0, nullptr, false);
+ Node* node = Node::New(zone(), 0, &kMockOpControl, 1, &start, false);
+ Node* success = Node::New(zone(), 0, common.IfSuccess(), 1, &node, false);
+ Node* use_control = Node::New(zone(), 0, common.Merge(1), 1, &success, false);
+ Node* replacement = Node::New(zone(), 0, &kMockOperator, 0, nullptr, false);
+ NodeProperties::ReplaceWithValue(node, replacement);
+ EXPECT_EQ(start, use_control->InputAt(0));
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_EQ(2, start->UseCount());
+ EXPECT_EQ(0, replacement->UseCount());
+ EXPECT_THAT(start->uses(), UnorderedElementsAre(use_control, node));
+}
+
+
TEST_F(NodePropertiesTest, FindProjection) {
CommonOperatorBuilder common(zone());
Node* start = Node::New(zone(), 0, common.Start(1), 0, nullptr, false);
@@ -41,6 +101,18 @@ TEST_F(NodePropertiesTest, CollectControlProjections_Branch) {
}
+TEST_F(NodePropertiesTest, CollectControlProjections_Call) {
+ Node* result[2];
+ CommonOperatorBuilder common(zone());
+ Node* call = Node::New(zone(), 1, &kMockCallOperator, 0, nullptr, false);
+ Node* if_ex = Node::New(zone(), 2, common.IfException(), 1, &call, false);
+ Node* if_ok = Node::New(zone(), 3, common.IfSuccess(), 1, &call, false);
+ NodeProperties::CollectControlProjections(call, result, arraysize(result));
+ EXPECT_EQ(if_ok, result[0]);
+ EXPECT_EQ(if_ex, result[1]);
+}
+
+
TEST_F(NodePropertiesTest, CollectControlProjections_Switch) {
Node* result[3];
CommonOperatorBuilder common(zone());
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index eccc96227e..5890b49b02 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -543,11 +543,23 @@ class IsEffectSetMatcher FINAL : public NodeMatcher {
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 0),
- "effect0", effect0_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 1),
- "effect1", effect1_matcher_, listener));
+ if (!NodeMatcher::MatchAndExplain(node, listener)) return false;
+
+ Node* effect0 = NodeProperties::GetEffectInput(node, 0);
+ Node* effect1 = NodeProperties::GetEffectInput(node, 1);
+
+ {
+ // Try matching in the reverse order first.
+ StringMatchResultListener value_listener;
+ if (effect0_matcher_.MatchAndExplain(effect1, &value_listener) &&
+ effect1_matcher_.MatchAndExplain(effect0, &value_listener)) {
+ return true;
+ }
+ }
+
+ return PrintMatchAndExplain(effect0, "effect0", effect0_matcher_,
+ listener) &&
+ PrintMatchAndExplain(effect1, "effect1", effect1_matcher_, listener);
}
private:
@@ -1303,6 +1315,12 @@ Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher) {
}
+Matcher<Node*> IsIfSuccess(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsControl1Matcher(IrOpcode::kIfSuccess, control_matcher));
+}
+
+
Matcher<Node*> IsSwitch(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher) {
return MakeMatcher(new IsSwitchMatcher(value_matcher, control_matcher));
@@ -1584,7 +1602,11 @@ IS_BINOP_MATCHER(Int32MulHigh)
IS_BINOP_MATCHER(Int32LessThan)
IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
+IS_BINOP_MATCHER(Float64Max)
+IS_BINOP_MATCHER(Float64Min)
IS_BINOP_MATCHER(Float64Sub)
+IS_BINOP_MATCHER(Float64InsertLowWord32)
+IS_BINOP_MATCHER(Float64InsertHighWord32)
#undef IS_BINOP_MATCHER
@@ -1592,7 +1614,6 @@ IS_BINOP_MATCHER(Float64Sub)
Matcher<Node*> Is##Name(const Matcher<Node*>& input_matcher) { \
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
-IS_UNOP_MATCHER(AnyToBoolean)
IS_UNOP_MATCHER(BooleanNot)
IS_UNOP_MATCHER(ChangeFloat64ToInt32)
IS_UNOP_MATCHER(ChangeFloat64ToUint32)
@@ -1604,14 +1625,16 @@ IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
IS_UNOP_MATCHER(Float64Sqrt)
-IS_UNOP_MATCHER(Float64Floor)
-IS_UNOP_MATCHER(Float64Ceil)
+IS_UNOP_MATCHER(Float64RoundDown)
IS_UNOP_MATCHER(Float64RoundTruncate)
IS_UNOP_MATCHER(Float64RoundTiesAway)
+IS_UNOP_MATCHER(Float64ExtractLowWord32)
+IS_UNOP_MATCHER(Float64ExtractHighWord32)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
IS_UNOP_MATCHER(ObjectIsSmi)
IS_UNOP_MATCHER(ObjectIsNonNegativeSmi)
+IS_UNOP_MATCHER(Word32Clz)
#undef IS_UNOP_MATCHER
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 03011972b7..7c306a2c9d 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -47,6 +47,7 @@ Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control2_matcher);
Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfSuccess(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsSwitch(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsIfValue(const Matcher<int32_t>& value_matcher,
@@ -100,7 +101,6 @@ Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsAnyToBoolean(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsNumberEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
@@ -169,6 +169,7 @@ Matcher<Node*> IsWord32Ror(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Clz(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
@@ -202,13 +203,22 @@ Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Max(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsFloat64Floor(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsFloat64Ceil(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64RoundDown(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTiesAway(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64ExtractLowWord32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64ExtractHighWord32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64InsertLowWord32(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64InsertHighWord32(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
const Matcher<Node*>& context_matcher,
const Matcher<Node*>& effect_matcher,
diff --git a/deps/v8/test/unittests/compiler/node-unittest.cc b/deps/v8/test/unittests/compiler/node-unittest.cc
index f56d7d6f8c..1a6c1bdf37 100644
--- a/deps/v8/test/unittests/compiler/node-unittest.cc
+++ b/deps/v8/test/unittests/compiler/node-unittest.cc
@@ -48,15 +48,15 @@ TEST_F(NodeTest, NewWithInputs) {
EXPECT_EQ(0, n0->InputCount());
Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
EXPECT_EQ(1, n0->UseCount());
- EXPECT_EQ(n1, n0->UseAt(0));
+ EXPECT_THAT(n0->uses(), UnorderedElementsAre(n1));
EXPECT_EQ(0, n1->UseCount());
EXPECT_EQ(1, n1->InputCount());
EXPECT_EQ(n0, n1->InputAt(0));
Node* n0_n1[] = {n0, n1};
Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
EXPECT_EQ(2, n0->UseCount());
- EXPECT_EQ(n1, n0->UseAt(0));
- EXPECT_EQ(n2, n0->UseAt(1));
+ EXPECT_THAT(n0->uses(), UnorderedElementsAre(n1, n2));
+ EXPECT_THAT(n1->uses(), UnorderedElementsAre(n2));
EXPECT_EQ(2, n2->InputCount());
EXPECT_EQ(n0, n2->InputAt(0));
EXPECT_EQ(n1, n2->InputAt(1));
diff --git a/deps/v8/test/unittests/compiler/opcodes-unittest.cc b/deps/v8/test/unittests/compiler/opcodes-unittest.cc
index ca79e8ac8b..3bb65c2e13 100644
--- a/deps/v8/test/unittests/compiler/opcodes-unittest.cc
+++ b/deps/v8/test/unittests/compiler/opcodes-unittest.cc
@@ -64,6 +64,21 @@ bool IsConstantOpcode(IrOpcode::Value opcode) {
}
+bool IsComparisonOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ JS_COMPARE_BINOP_LIST(OPCODE)
+ SIMPLIFIED_COMPARE_BINOP_LIST(OPCODE)
+ MACHINE_COMPARE_BINOP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
const IrOpcode::Value kInvalidOpcode = static_cast<IrOpcode::Value>(123456789);
} // namespace
@@ -109,6 +124,16 @@ TEST(IrOpcodeTest, IsConstantOpcode) {
}
+TEST(IrOpcodeTest, IsComparisonOpcode) {
+ EXPECT_FALSE(IrOpcode::IsComparisonOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsComparisonOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsComparisonOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
TEST(IrOpcodeTest, Mnemonic) {
EXPECT_STREQ("UnknownOpcode", IrOpcode::Mnemonic(kInvalidOpcode));
#define OPCODE(Opcode) \
diff --git a/deps/v8/test/unittests/compiler/ppc/OWNERS b/deps/v8/test/unittests/compiler/ppc/OWNERS
new file mode 100644
index 0000000000..beecb3d0b1
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/ppc/OWNERS
@@ -0,0 +1,3 @@
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
index c82cc3733e..873b4ecd2a 100644
--- a/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/register-allocator-unittest.cc
@@ -301,6 +301,31 @@ TEST_F(RegisterAllocatorTest, SplitBeforeInstruction) {
}
+TEST_F(RegisterAllocatorTest, SplitBeforeInstruction2) {
+ const int kNumRegs = 6;
+ SetNumRegs(kNumRegs, kNumRegs);
+
+ StartBlock();
+
+ // Stack parameters/spilled values.
+ auto p_0 = Define(Slot(-1));
+ auto p_1 = Define(Slot(-2));
+
+ // Fill registers.
+ VReg values[kNumRegs];
+ for (size_t i = 0; i < arraysize(values); ++i) {
+ values[i] = Define(Reg(static_cast<int>(i)));
+ }
+
+ // values[0] and [1] will be split in the second half of this instruction.
+ EmitOOI(Reg(0), Reg(1), Reg(p_0, 0), Reg(p_1, 1));
+ EmitI(Reg(values[0]), Reg(values[1]));
+ EndBlock(Last());
+
+ Allocate();
+}
+
+
TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) {
// Outer diamond.
StartBlock();
@@ -466,6 +491,87 @@ TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
Allocate();
}
+
+namespace {
+
+enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
+
+const ParameterType kParameterTypes[] = {
+ ParameterType::kFixedSlot, ParameterType::kSlot, ParameterType::kRegister,
+ ParameterType::kFixedRegister};
+
+class SlotConstraintTest : public RegisterAllocatorTest,
+ public ::testing::WithParamInterface<
+ ::testing::tuple<ParameterType, int>> {
+ public:
+ static const int kMaxVariant = 5;
+
+ protected:
+ ParameterType parameter_type() const {
+ return ::testing::get<0>(B::GetParam());
+ }
+ int variant() const { return ::testing::get<1>(B::GetParam()); }
+
+ private:
+ typedef ::testing::WithParamInterface<::testing::tuple<ParameterType, int>> B;
+};
+}
+
+
+#if GTEST_HAS_COMBINE
+
+TEST_P(SlotConstraintTest, SlotConstraint) {
+ StartBlock();
+ VReg p_0;
+ switch (parameter_type()) {
+ case ParameterType::kFixedSlot:
+ p_0 = Parameter(Slot(-1));
+ break;
+ case ParameterType::kSlot:
+ p_0 = Parameter(Slot(-1));
+ break;
+ case ParameterType::kRegister:
+ p_0 = Parameter(Reg());
+ break;
+ case ParameterType::kFixedRegister:
+ p_0 = Parameter(Reg(1));
+ break;
+ }
+ switch (variant()) {
+ case 0:
+ EmitI(Slot(p_0), Reg(p_0));
+ break;
+ case 1:
+ EmitI(Slot(p_0));
+ break;
+ case 2:
+ EmitI(Reg(p_0));
+ EmitI(Slot(p_0));
+ break;
+ case 3:
+ EmitI(Slot(p_0));
+ EmitI(Reg(p_0));
+ break;
+ case 4:
+ EmitI(Slot(p_0, -1), Slot(p_0), Reg(p_0), Reg(p_0, 1));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ EndBlock(Last());
+
+ Allocate();
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+ RegisterAllocatorTest, SlotConstraintTest,
+ ::testing::Combine(::testing::ValuesIn(kParameterTypes),
+ ::testing::Range(0, SlotConstraintTest::kMaxVariant)));
+
+#endif // GTEST_HAS_COMBINE
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/schedule-unittest.cc b/deps/v8/test/unittests/compiler/schedule-unittest.cc
index 70fd4d50ad..bc825353c4 100644
--- a/deps/v8/test/unittests/compiler/schedule-unittest.cc
+++ b/deps/v8/test/unittests/compiler/schedule-unittest.cc
@@ -73,8 +73,10 @@ typedef TestWithZone ScheduleTest;
namespace {
+const Operator kCallOperator(IrOpcode::kCall, Operator::kNoProperties,
+ "MockCall", 0, 0, 0, 0, 0, 0);
const Operator kBranchOperator(IrOpcode::kBranch, Operator::kNoProperties,
- "Branch", 0, 0, 0, 0, 0, 0);
+ "MockBranch", 0, 0, 0, 0, 0, 0);
const Operator kDummyOperator(IrOpcode::kParameter, Operator::kNoProperties,
"Dummy", 0, 0, 0, 0, 0, 0);
@@ -135,6 +137,35 @@ TEST_F(ScheduleTest, AddGoto) {
}
+TEST_F(ScheduleTest, AddCall) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+
+ Node* call = Node::New(zone(), 0, &kCallOperator, 0, nullptr, false);
+ BasicBlock* sblock = schedule.NewBasicBlock();
+ BasicBlock* eblock = schedule.NewBasicBlock();
+ schedule.AddCall(start, call, sblock, eblock);
+
+ EXPECT_EQ(start, schedule.block(call));
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(2u, start->SuccessorCount());
+ EXPECT_EQ(sblock, start->SuccessorAt(0));
+ EXPECT_EQ(eblock, start->SuccessorAt(1));
+ EXPECT_THAT(start->successors(), ElementsAre(sblock, eblock));
+
+ EXPECT_EQ(1u, sblock->PredecessorCount());
+ EXPECT_EQ(0u, sblock->SuccessorCount());
+ EXPECT_EQ(start, sblock->PredecessorAt(0));
+ EXPECT_THAT(sblock->predecessors(), ElementsAre(start));
+
+ EXPECT_EQ(1u, eblock->PredecessorCount());
+ EXPECT_EQ(0u, eblock->SuccessorCount());
+ EXPECT_EQ(start, eblock->PredecessorAt(0));
+ EXPECT_THAT(eblock->predecessors(), ElementsAre(start));
+}
+
+
TEST_F(ScheduleTest, AddBranch) {
Schedule schedule(zone());
BasicBlock* start = schedule.start();
diff --git a/deps/v8/test/unittests/compiler/scheduler-unittest.cc b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
index 860d5cd325..eeb5bbc4d6 100644
--- a/deps/v8/test/unittests/compiler/scheduler-unittest.cc
+++ b/deps/v8/test/unittests/compiler/scheduler-unittest.cc
@@ -16,6 +16,9 @@
#include "src/compiler/verifier.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::AnyOf;
namespace v8 {
namespace internal {
@@ -26,31 +29,31 @@ class SchedulerTest : public TestWithZone {
SchedulerTest()
: graph_(zone()), common_(zone()), simplified_(zone()), js_(zone()) {}
- static Schedule* ComputeAndVerifySchedule(int expected, Graph* graph) {
+ Schedule* ComputeAndVerifySchedule(size_t expected) {
if (FLAG_trace_turbo) {
OFStream os(stdout);
- os << AsDOT(*graph);
+ os << AsDOT(*graph());
}
- Schedule* schedule = Scheduler::ComputeSchedule(graph->zone(), graph,
- Scheduler::kSplitNodes);
+ Schedule* schedule =
+ Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kSplitNodes);
if (FLAG_trace_turbo_scheduler) {
OFStream os(stdout);
os << *schedule << std::endl;
}
ScheduleVerifier::Run(schedule);
- CHECK_EQ(expected, GetScheduledNodeCount(schedule));
+ EXPECT_EQ(expected, GetScheduledNodeCount(schedule));
return schedule;
}
- static int GetScheduledNodeCount(const Schedule* schedule) {
+ size_t GetScheduledNodeCount(const Schedule* schedule) {
size_t node_count = 0;
for (auto block : *schedule->rpo_order()) {
node_count += block->NodeCount();
if (block->control() != BasicBlock::kNone) ++node_count;
}
- return static_cast<int>(node_count);
+ return node_count;
}
Graph* graph() { return &graph_; }
@@ -71,8 +74,8 @@ class SchedulerRPOTest : public SchedulerTest {
SchedulerRPOTest() {}
// TODO(titzer): pull RPO tests out to their own file.
- static void CheckRPONumbers(BasicBlockVector* order, size_t expected,
- bool loops_allowed) {
+ void CheckRPONumbers(BasicBlockVector* order, size_t expected,
+ bool loops_allowed) {
CHECK(expected == order->size());
for (int i = 0; i < static_cast<int>(order->size()); i++) {
CHECK(order->at(i)->rpo_number() == i);
@@ -83,8 +86,7 @@ class SchedulerRPOTest : public SchedulerTest {
}
}
- static void CheckLoop(BasicBlockVector* order, BasicBlock** blocks,
- int body_size) {
+ void CheckLoop(BasicBlockVector* order, BasicBlock** blocks, int body_size) {
BasicBlock* header = blocks[0];
BasicBlock* end = header->loop_end();
CHECK(end);
@@ -110,11 +112,9 @@ class SchedulerRPOTest : public SchedulerTest {
BasicBlock* header() { return nodes[0]; }
BasicBlock* last() { return nodes[count - 1]; }
~TestLoop() { delete[] nodes; }
-
- void Check(BasicBlockVector* order) { CheckLoop(order, nodes, count); }
};
- static TestLoop* CreateLoop(Schedule* schedule, int count) {
+ TestLoop* CreateLoop(Schedule* schedule, int count) {
TestLoop* loop = new TestLoop();
loop->count = count;
loop->nodes = new BasicBlock* [count];
@@ -130,75 +130,27 @@ class SchedulerRPOTest : public SchedulerTest {
};
-class SchedulerTestWithIsolate : public SchedulerTest, public TestWithIsolate {
- public:
- SchedulerTestWithIsolate() {}
-
- Unique<HeapObject> GetUniqueUndefined() {
- Handle<HeapObject> object =
- Handle<HeapObject>(isolate()->heap()->undefined_value(), isolate());
- return Unique<HeapObject>::CreateUninitialized(object);
- }
-};
-
namespace {
+const Operator kHeapConstant(IrOpcode::kHeapConstant, Operator::kPure,
+ "HeapConstant", 0, 0, 0, 1, 0, 0);
const Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0,
0, 1, 0, 0);
+const Operator kMockCall(IrOpcode::kCall, Operator::kNoProperties, "MockCall",
+ 0, 0, 1, 1, 0, 2);
} // namespace
-TEST_F(SchedulerTest, BuildScheduleEmpty) {
- graph()->SetStart(graph()->NewNode(common()->Start(0)));
- graph()->SetEnd(graph()->NewNode(common()->End(), graph()->start()));
- USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
-}
-
-
-TEST_F(SchedulerTest, BuildScheduleOneParameter) {
- graph()->SetStart(graph()->NewNode(common()->Start(0)));
-
- Node* p1 = graph()->NewNode(common()->Parameter(0), graph()->start());
- Node* ret = graph()->NewNode(common()->Return(), p1, graph()->start(),
- graph()->start());
-
- graph()->SetEnd(graph()->NewNode(common()->End(), ret));
-
- USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
-}
-
-
-TEST_F(SchedulerTest, BuildScheduleIfSplit) {
- graph()->SetStart(graph()->NewNode(common()->Start(3)));
-
- Node* p1 = graph()->NewNode(common()->Parameter(0), graph()->start());
- Node* p2 = graph()->NewNode(common()->Parameter(1), graph()->start());
- Node* p3 = graph()->NewNode(common()->Parameter(2), graph()->start());
- Node* p4 = graph()->NewNode(common()->Parameter(3), graph()->start());
- Node* p5 = graph()->NewNode(common()->Parameter(4), graph()->start());
- Node* cmp = graph()->NewNode(js()->LessThanOrEqual(), p1, p2, p3,
- graph()->start(), graph()->start());
- Node* branch = graph()->NewNode(common()->Branch(), cmp, graph()->start());
- Node* true_branch = graph()->NewNode(common()->IfTrue(), branch);
- Node* false_branch = graph()->NewNode(common()->IfFalse(), branch);
-
- Node* ret1 =
- graph()->NewNode(common()->Return(), p4, graph()->start(), true_branch);
- Node* ret2 =
- graph()->NewNode(common()->Return(), p5, graph()->start(), false_branch);
- Node* merge = graph()->NewNode(common()->Merge(2), ret1, ret2);
- graph()->SetEnd(graph()->NewNode(common()->End(), merge));
-
- ComputeAndVerifySchedule(13, graph());
-}
+// -----------------------------------------------------------------------------
+// Special reverse-post-order block ordering.
TEST_F(SchedulerRPOTest, Degenerate1) {
Schedule schedule(zone());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, 1, false);
- CHECK_EQ(schedule.start(), order->at(0));
+ EXPECT_EQ(schedule.start(), order->at(0));
}
@@ -208,8 +160,8 @@ TEST_F(SchedulerRPOTest, Degenerate2) {
schedule.AddGoto(schedule.start(), schedule.end());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, 2, false);
- CHECK_EQ(schedule.start(), order->at(0));
- CHECK_EQ(schedule.end(), order->at(1));
+ EXPECT_EQ(schedule.start(), order->at(0));
+ EXPECT_EQ(schedule.end(), order->at(1));
}
@@ -230,7 +182,7 @@ TEST_F(SchedulerRPOTest, Line) {
for (size_t i = 0; i < schedule.BasicBlockCount(); i++) {
BasicBlock* block = schedule.GetBlockById(BasicBlock::Id::FromSize(i));
if (block->rpo_number() >= 0 && block->SuccessorCount() == 1) {
- CHECK(block->rpo_number() + 1 == block->SuccessorAt(0)->rpo_number());
+ EXPECT_EQ(block->rpo_number() + 1, block->SuccessorAt(0)->rpo_number());
}
}
}
@@ -265,7 +217,7 @@ TEST_F(SchedulerRPOTest, EndLoop) {
schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, 3, true);
- loop1->Check(order);
+ CheckLoop(order, loop1->nodes, loop1->count);
}
@@ -276,7 +228,7 @@ TEST_F(SchedulerRPOTest, EndLoopNested) {
schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, 3, true);
- loop1->Check(order);
+ CheckLoop(order, loop1->nodes, loop1->count);
}
@@ -296,10 +248,10 @@ TEST_F(SchedulerRPOTest, Diamond) {
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, 4, false);
- CHECK_EQ(0, A->rpo_number());
- CHECK((B->rpo_number() == 1 && C->rpo_number() == 2) ||
- (B->rpo_number() == 2 && C->rpo_number() == 1));
- CHECK_EQ(3, D->rpo_number());
+ EXPECT_EQ(0, A->rpo_number());
+ EXPECT_THAT(B->rpo_number(), AnyOf(1, 2));
+ EXPECT_THAT(C->rpo_number(), AnyOf(1, 2));
+ EXPECT_EQ(3, D->rpo_number());
}
@@ -464,11 +416,9 @@ TEST_F(SchedulerRPOTest, LoopFollow1) {
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
- static_cast<int>(order->size()));
-
- loop1->Check(order);
- loop2->Check(order);
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
}
@@ -489,10 +439,9 @@ TEST_F(SchedulerRPOTest, LoopFollow2) {
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
- static_cast<int>(order->size()));
- loop1->Check(order);
- loop2->Check(order);
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
}
@@ -510,10 +459,9 @@ TEST_F(SchedulerRPOTest, LoopFollowN) {
schedule.AddSuccessorForTesting(loop2->nodes[exit], E);
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
- static_cast<int>(order->size()));
- loop1->Check(order);
- loop2->Check(order);
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
}
}
}
@@ -539,10 +487,9 @@ TEST_F(SchedulerRPOTest, NestedLoopFollow1) {
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
- CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
- static_cast<int>(order->size()));
- loop1->Check(order);
- loop2->Check(order);
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
CheckLoop(order, loop3, 4);
@@ -566,7 +513,7 @@ TEST_F(SchedulerRPOTest, LoopBackedges1) {
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- loop1->Check(order);
+ CheckLoop(order, loop1->nodes, loop1->count);
}
}
}
@@ -591,7 +538,7 @@ TEST_F(SchedulerRPOTest, LoopOutedges1) {
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- loop1->Check(order);
+ CheckLoop(order, loop1->nodes, loop1->count);
}
}
}
@@ -616,7 +563,7 @@ TEST_F(SchedulerRPOTest, LoopOutedges2) {
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- loop1->Check(order);
+ CheckLoop(order, loop1->nodes, loop1->count);
}
}
@@ -640,10 +587,10 @@ TEST_F(SchedulerRPOTest, LoopOutloops1) {
BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
CheckRPONumbers(order, schedule.BasicBlockCount(), true);
- loop1->Check(order);
+ CheckLoop(order, loop1->nodes, loop1->count);
for (int j = 0; j < size; j++) {
- loopN[j]->Check(order);
+ CheckLoop(order, loopN[j]->nodes, loopN[j]->count);
delete loopN[j];
}
delete[] loopN;
@@ -676,7 +623,58 @@ TEST_F(SchedulerRPOTest, LoopMultibackedge) {
}
-TEST_F(SchedulerTestWithIsolate, BuildScheduleIfSplitWithEffects) {
+// -----------------------------------------------------------------------------
+// Graph end-to-end scheduling.
+
+
+TEST_F(SchedulerTest, BuildScheduleEmpty) {
+ graph()->SetStart(graph()->NewNode(common()->Start(0)));
+ graph()->SetEnd(graph()->NewNode(common()->End(), graph()->start()));
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+}
+
+
+TEST_F(SchedulerTest, BuildScheduleOneParameter) {
+ graph()->SetStart(graph()->NewNode(common()->Start(0)));
+
+ Node* p1 = graph()->NewNode(common()->Parameter(0), graph()->start());
+ Node* ret = graph()->NewNode(common()->Return(), p1, graph()->start(),
+ graph()->start());
+
+ graph()->SetEnd(graph()->NewNode(common()->End(), ret));
+
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+}
+
+
+TEST_F(SchedulerTest, BuildScheduleIfSplit) {
+ graph()->SetStart(graph()->NewNode(common()->Start(3)));
+
+ Node* p1 = graph()->NewNode(common()->Parameter(0), graph()->start());
+ Node* p2 = graph()->NewNode(common()->Parameter(1), graph()->start());
+ Node* p3 = graph()->NewNode(common()->Parameter(2), graph()->start());
+ Node* p4 = graph()->NewNode(common()->Parameter(3), graph()->start());
+ Node* p5 = graph()->NewNode(common()->Parameter(4), graph()->start());
+ Node* cmp = graph()->NewNode(js()->LessThanOrEqual(), p1, p2, p3,
+ graph()->start(), graph()->start());
+ Node* branch = graph()->NewNode(common()->Branch(), cmp, graph()->start());
+ Node* true_branch = graph()->NewNode(common()->IfTrue(), branch);
+ Node* false_branch = graph()->NewNode(common()->IfFalse(), branch);
+
+ Node* ret1 =
+ graph()->NewNode(common()->Return(), p4, graph()->start(), true_branch);
+ Node* ret2 =
+ graph()->NewNode(common()->Return(), p5, graph()->start(), false_branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), ret1, ret2);
+ graph()->SetEnd(graph()->NewNode(common()->End(), merge));
+
+ ComputeAndVerifySchedule(13);
+}
+
+
+TEST_F(SchedulerTest, BuildScheduleIfSplitWithEffects) {
+ FLAG_turbo_deoptimization = false;
+
const Operator* op;
// Manually transcripted code for:
@@ -720,7 +718,7 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleIfSplitWithEffects) {
Node* n3 = graph()->NewNode(op, n0);
USE(n3);
n11->ReplaceInput(1, n3);
- op = common()->HeapConstant(GetUniqueUndefined());
+ op = &kHeapConstant;
Node* n7 = graph()->NewNode(op);
USE(n7);
n11->ReplaceInput(2, n7);
@@ -808,11 +806,13 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleIfSplitWithEffects) {
graph()->SetStart(n0);
graph()->SetEnd(n23);
- ComputeAndVerifySchedule(20, graph());
+ ComputeAndVerifySchedule(20);
}
-TEST_F(SchedulerTestWithIsolate, BuildScheduleSimpleLoop) {
+TEST_F(SchedulerTest, BuildScheduleSimpleLoop) {
+ FLAG_turbo_deoptimization = false;
+
const Operator* op;
// Manually transcripted code for:
@@ -846,7 +846,7 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleSimpleLoop) {
Node* n16 = graph()->NewNode(op, nil, nil, nil, nil);
USE(n16);
n16->ReplaceInput(0, n8);
- op = common()->HeapConstant(GetUniqueUndefined());
+ op = &kHeapConstant;
Node* n5 = graph()->NewNode(op);
USE(n5);
n16->ReplaceInput(1, n5);
@@ -911,11 +911,13 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleSimpleLoop) {
graph()->SetStart(n0);
graph()->SetEnd(n20);
- ComputeAndVerifySchedule(19, graph());
+ ComputeAndVerifySchedule(19);
}
-TEST_F(SchedulerTestWithIsolate, BuildScheduleComplexLoops) {
+TEST_F(SchedulerTest, BuildScheduleComplexLoops) {
+ FLAG_turbo_deoptimization = false;
+
const Operator* op;
// Manually transcripted code for:
@@ -961,7 +963,7 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleComplexLoops) {
Node* n18 = graph()->NewNode(op, nil, nil, nil, nil);
USE(n18);
n18->ReplaceInput(0, n9);
- op = common()->HeapConstant(GetUniqueUndefined());
+ op = &kHeapConstant;
Node* n6 = graph()->NewNode(op);
USE(n6);
n18->ReplaceInput(1, n6);
@@ -1149,11 +1151,13 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleComplexLoops) {
graph()->SetStart(n0);
graph()->SetEnd(n46);
- ComputeAndVerifySchedule(46, graph());
+ ComputeAndVerifySchedule(46);
}
-TEST_F(SchedulerTestWithIsolate, BuildScheduleBreakAndContinue) {
+TEST_F(SchedulerTest, BuildScheduleBreakAndContinue) {
+ FLAG_turbo_deoptimization = false;
+
const Operator* op;
// Manually transcripted code for:
@@ -1201,7 +1205,7 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleBreakAndContinue) {
Node* n20 = graph()->NewNode(op, nil, nil, nil, nil);
USE(n20);
n20->ReplaceInput(0, n10);
- op = common()->HeapConstant(GetUniqueUndefined());
+ op = &kHeapConstant;
Node* n6 = graph()->NewNode(op);
USE(n6);
n20->ReplaceInput(1, n6);
@@ -1469,11 +1473,13 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleBreakAndContinue) {
graph()->SetStart(n0);
graph()->SetEnd(n58);
- ComputeAndVerifySchedule(62, graph());
+ ComputeAndVerifySchedule(62);
}
-TEST_F(SchedulerTestWithIsolate, BuildScheduleSimpleLoopWithCodeMotion) {
+TEST_F(SchedulerTest, BuildScheduleSimpleLoopWithCodeMotion) {
+ FLAG_turbo_deoptimization = false;
+
const Operator* op;
// Manually transcripted code for:
@@ -1533,7 +1539,7 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleSimpleLoopWithCodeMotion) {
USE(n14);
n14->ReplaceInput(0, n9);
n14->ReplaceInput(1, n10);
- op = common()->HeapConstant(GetUniqueUndefined());
+ op = &kHeapConstant;
Node* n6 = graph()->NewNode(op);
USE(n6);
n14->ReplaceInput(2, n6);
@@ -1583,10 +1589,7 @@ TEST_F(SchedulerTestWithIsolate, BuildScheduleSimpleLoopWithCodeMotion) {
graph()->SetStart(n0);
graph()->SetEnd(n22);
- Schedule* schedule = ComputeAndVerifySchedule(19, graph());
- // Make sure the integer-only add gets hoisted to a different block that the
- // JSAdd.
- CHECK(schedule->block(n19) != schedule->block(n20));
+ ComputeAndVerifySchedule(19);
}
@@ -1617,7 +1620,7 @@ TARGET_TEST_F(SchedulerTest, FloatingDiamond1) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(13, graph());
+ ComputeAndVerifySchedule(13);
}
@@ -1635,7 +1638,7 @@ TARGET_TEST_F(SchedulerTest, FloatingDiamond2) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(24, graph());
+ ComputeAndVerifySchedule(24);
}
@@ -1654,7 +1657,7 @@ TARGET_TEST_F(SchedulerTest, FloatingDiamond3) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(33, graph());
+ ComputeAndVerifySchedule(33);
}
@@ -1691,7 +1694,7 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamonds) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(23, graph());
+ ComputeAndVerifySchedule(23);
}
@@ -1735,7 +1738,7 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithChain) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(36, graph());
+ ComputeAndVerifySchedule(36);
}
@@ -1769,7 +1772,7 @@ TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithLoop) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(20, graph());
+ ComputeAndVerifySchedule(20);
}
@@ -1802,7 +1805,7 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond1) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(20, graph());
+ ComputeAndVerifySchedule(20);
}
@@ -1836,7 +1839,7 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond2) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(20, graph());
+ ComputeAndVerifySchedule(20);
}
@@ -1882,7 +1885,7 @@ TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond3) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(28, graph());
+ ComputeAndVerifySchedule(28);
}
@@ -1916,7 +1919,7 @@ TARGET_TEST_F(SchedulerTest, PhisPushedDownToDifferentBranches) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(24, graph());
+ ComputeAndVerifySchedule(24);
}
@@ -1937,10 +1940,10 @@ TARGET_TEST_F(SchedulerTest, BranchHintTrue) {
graph()->SetEnd(end);
- Schedule* schedule = ComputeAndVerifySchedule(13, graph());
+ Schedule* schedule = ComputeAndVerifySchedule(13);
// Make sure the false block is marked as deferred.
- CHECK(!schedule->block(t)->deferred());
- CHECK(schedule->block(f)->deferred());
+ EXPECT_FALSE(schedule->block(t)->deferred());
+ EXPECT_TRUE(schedule->block(f)->deferred());
}
@@ -1961,10 +1964,38 @@ TARGET_TEST_F(SchedulerTest, BranchHintFalse) {
graph()->SetEnd(end);
- Schedule* schedule = ComputeAndVerifySchedule(13, graph());
+ Schedule* schedule = ComputeAndVerifySchedule(13);
// Make sure the true block is marked as deferred.
- CHECK(schedule->block(t)->deferred());
- CHECK(!schedule->block(f)->deferred());
+ EXPECT_TRUE(schedule->block(t)->deferred());
+ EXPECT_FALSE(schedule->block(f)->deferred());
+}
+
+
+TARGET_TEST_F(SchedulerTest, CallException) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* c1 = graph()->NewNode(&kMockCall, start);
+ Node* ok1 = graph()->NewNode(common()->IfSuccess(), c1);
+ Node* ex1 = graph()->NewNode(common()->IfException(), c1);
+ Node* c2 = graph()->NewNode(&kMockCall, ok1);
+ Node* ok2 = graph()->NewNode(common()->IfSuccess(), c2);
+ Node* ex2 = graph()->NewNode(common()->IfException(), c2);
+ Node* hdl = graph()->NewNode(common()->Merge(2), ex1, ex2);
+ Node* m = graph()->NewNode(common()->Merge(2), ok2, hdl);
+ Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), c2, p0, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
+ Node* end = graph()->NewNode(common()->End(), ret);
+
+ graph()->SetEnd(end);
+
+ Schedule* schedule = ComputeAndVerifySchedule(17);
+ // Make sure the exception blocks as well as the handler are deferred.
+ EXPECT_TRUE(schedule->block(ex1)->deferred());
+ EXPECT_TRUE(schedule->block(ex2)->deferred());
+ EXPECT_TRUE(schedule->block(hdl)->deferred());
+ EXPECT_FALSE(schedule->block(m)->deferred());
}
@@ -1987,7 +2018,7 @@ TARGET_TEST_F(SchedulerTest, Switch) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(16, graph());
+ ComputeAndVerifySchedule(16);
}
@@ -2010,7 +2041,7 @@ TARGET_TEST_F(SchedulerTest, FloatingSwitch) {
graph()->SetEnd(end);
- ComputeAndVerifySchedule(16, graph());
+ ComputeAndVerifySchedule(16);
}
} // namespace compiler
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index 38924123df..f8f9561af8 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -119,39 +119,6 @@ const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
// -----------------------------------------------------------------------------
-// AnyToBoolean
-
-
-TEST_F(SimplifiedOperatorReducerTest, AnyToBooleanWithBoolean) {
- Node* p = Parameter(Type::Boolean());
- Reduction r = Reduce(graph()->NewNode(simplified()->AnyToBoolean(), p));
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(p, r.replacement());
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, AnyToBooleanWithOrderedNumber) {
- Node* p = Parameter(Type::OrderedNumber());
- Reduction r = Reduce(graph()->NewNode(simplified()->AnyToBoolean(), p));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsBooleanNot(IsNumberEqual(p, IsNumberConstant(0))));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, AnyToBooleanWithString) {
- Node* p = Parameter(Type::String());
- Reduction r = Reduce(graph()->NewNode(simplified()->AnyToBoolean(), p));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsBooleanNot(
- IsNumberEqual(IsLoadField(AccessBuilder::ForStringLength(), p,
- graph()->start(), graph()->start()),
- IsNumberConstant(0))));
-}
-
-
-// -----------------------------------------------------------------------------
// BooleanNot
diff --git a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
index 680793023f..a5dad5a415 100644
--- a/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
@@ -38,7 +38,6 @@ const PureOperator kPureOperators[] = {
&SimplifiedOperatorBuilder::Name, IrOpcode::k##Name, \
Operator::kPure | properties, input_count \
}
- PURE(AnyToBoolean, Operator::kNoProperties, 1),
PURE(BooleanNot, Operator::kNoProperties, 1),
PURE(BooleanToNumber, Operator::kNoProperties, 1),
PURE(NumberEqual, Operator::kCommutative, 2),
diff --git a/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
new file mode 100644
index 0000000000..e6f4701598
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/state-values-utils-unittest.cc
@@ -0,0 +1,149 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/state-values-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class StateValuesIteratorTest : public GraphTest {
+ public:
+ StateValuesIteratorTest() : GraphTest(3) {}
+
+ Node* StateValuesFromVector(NodeVector* nodes) {
+ int count = static_cast<int>(nodes->size());
+ return graph()->NewNode(common()->StateValues(count), count,
+ count == 0 ? nullptr : &(nodes->front()));
+ }
+};
+
+
+TEST_F(StateValuesIteratorTest, SimpleIteration) {
+ NodeVector inputs(zone());
+ const int count = 10;
+ for (int i = 0; i < count; i++) {
+ inputs.push_back(Int32Constant(i));
+ }
+ Node* state_values = StateValuesFromVector(&inputs);
+ int i = 0;
+ for (StateValuesAccess::TypedNode node : StateValuesAccess(state_values)) {
+ EXPECT_THAT(node.node, IsInt32Constant(i));
+ i++;
+ }
+ EXPECT_EQ(count, i);
+}
+
+
+TEST_F(StateValuesIteratorTest, EmptyIteration) {
+ NodeVector inputs(zone());
+ Node* state_values = StateValuesFromVector(&inputs);
+ for (auto node : StateValuesAccess(state_values)) {
+ USE(node);
+ FAIL();
+ }
+}
+
+
+TEST_F(StateValuesIteratorTest, NestedIteration) {
+ NodeVector inputs(zone());
+ int count = 0;
+ for (int i = 0; i < 8; i++) {
+ if (i == 2) {
+ // Single nested in index 2.
+ NodeVector nested_inputs(zone());
+ for (int j = 0; j < 8; j++) {
+ nested_inputs.push_back(Int32Constant(count++));
+ }
+ inputs.push_back(StateValuesFromVector(&nested_inputs));
+ } else if (i == 5) {
+ // Double nested at index 5.
+ NodeVector nested_inputs(zone());
+ for (int j = 0; j < 8; j++) {
+ if (j == 7) {
+ NodeVector doubly_nested_inputs(zone());
+ for (int k = 0; k < 2; k++) {
+ doubly_nested_inputs.push_back(Int32Constant(count++));
+ }
+ nested_inputs.push_back(StateValuesFromVector(&doubly_nested_inputs));
+ } else {
+ nested_inputs.push_back(Int32Constant(count++));
+ }
+ }
+ inputs.push_back(StateValuesFromVector(&nested_inputs));
+ } else {
+ inputs.push_back(Int32Constant(count++));
+ }
+ }
+ Node* state_values = StateValuesFromVector(&inputs);
+ int i = 0;
+ for (StateValuesAccess::TypedNode node : StateValuesAccess(state_values)) {
+ EXPECT_THAT(node.node, IsInt32Constant(i));
+ i++;
+ }
+ EXPECT_EQ(count, i);
+}
+
+
+TEST_F(StateValuesIteratorTest, TreeFromVector) {
+ int sizes[] = {0, 1, 2, 100, 5000, 30000};
+ TRACED_FOREACH(int, count, sizes) {
+ JSOperatorBuilder javascript(zone());
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, &machine);
+
+ // Generate the input vector.
+ NodeVector inputs(zone());
+ for (int i = 0; i < count; i++) {
+ inputs.push_back(Int32Constant(i));
+ }
+
+ // Build the tree.
+ StateValuesCache builder(&jsgraph);
+ Node* values_node = builder.GetNodeForValues(
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size());
+
+ // Check the tree contents with vector.
+ int i = 0;
+ for (StateValuesAccess::TypedNode node : StateValuesAccess(values_node)) {
+ EXPECT_THAT(node.node, IsInt32Constant(i));
+ i++;
+ }
+ EXPECT_EQ(inputs.size(), static_cast<size_t>(i));
+ }
+}
+
+
+TEST_F(StateValuesIteratorTest, BuildTreeIdentical) {
+ int sizes[] = {0, 1, 2, 100, 5000, 30000};
+ TRACED_FOREACH(int, count, sizes) {
+ JSOperatorBuilder javascript(zone());
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, &machine);
+
+ // Generate the input vector.
+ NodeVector inputs(zone());
+ for (int i = 0; i < count; i++) {
+ inputs.push_back(Int32Constant(i));
+ }
+
+ // Build two trees from the same data.
+ StateValuesCache builder(&jsgraph);
+ Node* node1 = builder.GetNodeForValues(
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size());
+ Node* node2 = builder.GetNodeForValues(
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size());
+
+ // The trees should be equal since the data was the same.
+ EXPECT_EQ(node1, node2);
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index 1f374c0f85..a83deafe7d 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -1030,6 +1030,25 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
// Miscellaneous.
+TEST_F(InstructionSelectorTest, Uint64LessThanWithLoadAndLoadStackPointer) {
+ StreamBuilder m(this, kMachBool);
+ Node* const sl = m.Load(
+ kMachPtr,
+ m.ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
+ Node* const sp = m.LoadStackPointer();
+ Node* const n = m.Uint64LessThan(sl, sp);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64StackCheck, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kUnsignedGreaterThan, s[0]->flags_condition());
+}
+
+
TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
StreamBuilder m(this, kMachInt64, kMachInt32);
@@ -1127,6 +1146,21 @@ TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, kMachUint32, kMachUint32);
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lzcnt32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
index 2076e604fa..ed33259775 100644
--- a/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -168,7 +168,7 @@ TEST_F(GCIdleTimeHandlerTest, DoScavengeHighScavengeSpeed) {
TEST_F(GCIdleTimeHandlerTest, ShouldDoMarkCompact) {
- size_t idle_time_in_ms = 16;
+ size_t idle_time_in_ms = GCIdleTimeHandler::kMaxScheduledIdleTime;
EXPECT_TRUE(GCIdleTimeHandler::ShouldDoMarkCompact(idle_time_in_ms, 0, 0));
}
@@ -440,5 +440,55 @@ TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeDoNothingButStartIdleRound) {
EXPECT_EQ(DO_NOTHING, action.type);
}
+
+TEST_F(GCIdleTimeHandlerTest, KeepDoingDoNothingWithZeroIdleTime) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ for (int i = 0; i < GCIdleTimeHandler::kMaxNoProgressIdleTimesPerIdleRound;
+ i++) {
+ GCIdleTimeAction action = handler()->Compute(0, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+ }
+ // Should still return DO_NOTHING if we have been given 0 deadline yet.
+ GCIdleTimeAction action = handler()->Compute(0, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DoneIfNotMakingProgressOnSweeping) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+
+ // Simulate sweeping being in-progress but not complete.
+ heap_state.incremental_marking_stopped = true;
+ heap_state.can_start_incremental_marking = false;
+ heap_state.sweeping_in_progress = true;
+ double idle_time_ms = 10.0;
+ for (int i = 0; i < GCIdleTimeHandler::kMaxNoProgressIdleTimesPerIdleRound;
+ i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+ }
+ // We should return DONE after not making progress for some time.
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DoneIfNotMakingProgressOnIncrementalMarking) {
+ GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+
+ // Simulate incremental marking stopped and not eligible to start.
+ heap_state.incremental_marking_stopped = true;
+ heap_state.can_start_incremental_marking = false;
+ double idle_time_ms = 10.0;
+ for (int i = 0; i < GCIdleTimeHandler::kMaxNoProgressIdleTimesPerIdleRound;
+ i++) {
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+ }
+ // We should return DONE after not making progress for some time.
+ GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+ EXPECT_EQ(DONE, action.type);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index a12d5e7f4b..2ed05b8adc 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -57,6 +57,7 @@
'compiler/js-intrinsic-lowering-unittest.cc',
'compiler/js-operator-unittest.cc',
'compiler/js-typed-lowering-unittest.cc',
+ 'compiler/liveness-analyzer-unittest.cc',
'compiler/load-elimination-unittest.cc',
'compiler/loop-peeling-unittest.cc',
'compiler/machine-operator-reducer-unittest.cc',
@@ -74,6 +75,7 @@
'compiler/scheduler-unittest.cc',
'compiler/simplified-operator-reducer-unittest.cc',
'compiler/simplified-operator-unittest.cc',
+ 'compiler/state-values-utils-unittest.cc',
'compiler/typer-unittest.cc',
'compiler/value-numbering-reducer-unittest.cc',
'compiler/zone-pool-unittest.cc',
diff --git a/deps/v8/test/webkit/fast/js/kde/prototype_length-expected.txt b/deps/v8/test/webkit/fast/js/kde/prototype_length-expected.txt
index 7c4c2e25d8..5cf2bd5db8 100644
--- a/deps/v8/test/webkit/fast/js/kde/prototype_length-expected.txt
+++ b/deps/v8/test/webkit/fast/js/kde/prototype_length-expected.txt
@@ -39,7 +39,7 @@ PASS Array.prototype.length is 6
PASS Function.prototype.length is 0
PASS String.prototype.length is 0
PASS delete Array.prototype.length is false
-PASS delete Function.prototype.length is false
+PASS delete Function.prototype.length is true
PASS delete String.prototype.length is false
PASS foundArrayPrototypeLength is false
PASS foundFunctionPrototypeLength is false
diff --git a/deps/v8/test/webkit/fast/js/kde/prototype_length.js b/deps/v8/test/webkit/fast/js/kde/prototype_length.js
index 4eb888c3da..48357005d1 100644
--- a/deps/v8/test/webkit/fast/js/kde/prototype_length.js
+++ b/deps/v8/test/webkit/fast/js/kde/prototype_length.js
@@ -43,7 +43,7 @@ shouldBe("String.prototype.length","0");
// check DontDelete
shouldBe("delete Array.prototype.length","false");
-shouldBe("delete Function.prototype.length","false");
+shouldBe("delete Function.prototype.length","true");
shouldBe("delete String.prototype.length","false");
// check DontEnum
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index fb8d77d8d2..d8c6864105 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -56,10 +56,10 @@
['arch == arm64 and simulator_run == True', {
'dfg-int-overflow-in-loop': [SKIP],
}], # 'arch == arm64 and simulator_run == True'
-['dcheck_always_on == True and arch == arm64', {
- # Doesn't work with gcc 4.6 on arm64 for some reason.
+['dcheck_always_on == True and (arch == arm or arch == arm64)', {
+ # Doesn't work with gcc 4.6 on arm or arm64 for some reason.
'reentrant-caching': [SKIP],
-}], # 'dcheck_always_on == True and arch == arm64'
+}], # 'dcheck_always_on == True and (arch == arm or arch == arm64)'
##############################################################################
diff --git a/deps/v8/testing/commit_queue/OWNERS b/deps/v8/testing/commit_queue/OWNERS
new file mode 100644
index 0000000000..1d89078df7
--- /dev/null
+++ b/deps/v8/testing/commit_queue/OWNERS
@@ -0,0 +1 @@
+sergiyb@chromium.org
diff --git a/deps/v8/testing/commit_queue/config.json b/deps/v8/testing/commit_queue/config.json
new file mode 100644
index 0000000000..aeb3803ac0
--- /dev/null
+++ b/deps/v8/testing/commit_queue/config.json
@@ -0,0 +1,74 @@
+{
+ "commit_burst_delay": 60,
+ "commit_user": "commit-bot@chromium.org",
+ "committer_project": "v8",
+ "cq_status_url": "https://chromium-cq-status.appspot.com",
+ "git_repo_url": "https://chromium.googlesource.com/v8/v8",
+ "hide_ref_in_committed_msg": true,
+ "max_commit_burst": 1,
+ "project_bases_legacy_from_git_repo_url": true,
+ "remote_branch": "refs/pending/heads/master",
+ "rietveld_url": "https://codereview.chromium.org",
+ "rietveld_user": "commit-bot@chromium.org",
+ "skip_throttle_users": [
+ "commit-bot@chromium.org"
+ ],
+ "tree_status_url": "https://v8-status.appspot.com",
+ "verifiers_no_patch": {
+ "experimental_try_job_verifier": {},
+ "reviewer_lgtm_verifier": {},
+ "tree_status_verifier": {},
+ "try_job_verifier": {
+ "launched": {
+ "tryserver.v8": {
+ "v8_android_arm_compile_rel": [
+ "defaulttests"
+ ],
+ "v8_linux64_asan_rel": [
+ "defaulttests"
+ ],
+ "v8_linux64_rel": [
+ "defaulttests"
+ ],
+ "v8_linux_arm64_rel": [
+ "defaulttests"
+ ],
+ "v8_linux_arm_rel": [
+ "defaulttests"
+ ],
+ "v8_linux_chromium_gn_rel": [
+ "defaulttests"
+ ],
+ "v8_linux_dbg": [
+ "defaulttests"
+ ],
+ "v8_linux_gcc_compile_rel": [
+ "defaulttests"
+ ],
+ "v8_linux_nodcheck_rel": [
+ "defaulttests"
+ ],
+ "v8_linux_rel": [
+ "defaulttests"
+ ],
+ "v8_mac_rel": [
+ "defaulttests"
+ ],
+ "v8_presubmit": [
+ "defaulttests"
+ ],
+ "v8_win64_rel": [
+ "defaulttests"
+ ],
+ "v8_win_compile_dbg": [
+ "defaulttests"
+ ],
+ "v8_win_rel": [
+ "defaulttests"
+ ]
+ }
+ },
+ "triggered": {}
+ }
+ }
+}
diff --git a/deps/v8/tools/check-name-clashes.py b/deps/v8/tools/check-name-clashes.py
index 89a7dee7a1..fcc70a5281 100755
--- a/deps/v8/tools/check-name-clashes.py
+++ b/deps/v8/tools/check-name-clashes.py
@@ -11,7 +11,6 @@ import sys
FILENAME = "src/runtime/runtime.h"
LISTHEAD = re.compile(r"#define\s+(\w+LIST\w*)\((\w+)\)")
LISTBODY = re.compile(r".*\\$")
-BLACKLIST = ['INLINE_FUNCTION_LIST']
class Function(object):
@@ -32,7 +31,7 @@ def FindLists(filename):
for line in f:
if mode == "SEARCHING":
match = LISTHEAD.match(line)
- if match and match.group(1) not in BLACKLIST:
+ if match:
mode = "APPENDING"
current_list.append(line)
else:
diff --git a/deps/v8/tools/cpu.sh b/deps/v8/tools/cpu.sh
index 8e8a243c60..0597d09ea9 100755
--- a/deps/v8/tools/cpu.sh
+++ b/deps/v8/tools/cpu.sh
@@ -31,7 +31,7 @@ single_core() {
all_cores() {
echo "Reactivating all CPU cores"
- for (( i=2; i<=$MAXID; i++ )); do
+ for (( i=1; i<=$MAXID; i++ )); do
echo 1 > $CPUPATH/cpu$i/online
done
}
diff --git a/deps/v8/tools/external-reference-check.py b/deps/v8/tools/external-reference-check.py
index 386d4a9ee5..bced8d478d 100644
--- a/deps/v8/tools/external-reference-check.py
+++ b/deps/v8/tools/external-reference-check.py
@@ -8,7 +8,7 @@ import os
import sys
DECLARE_FILE = "src/assembler.h"
-REGISTER_FILE = "src/serialize.cc"
+REGISTER_FILE = "src/snapshot/serialize.cc"
DECLARE_RE = re.compile("\s*static ExternalReference ([^(]+)\(")
REGISTER_RE = re.compile("\s*Add\(ExternalReference::([^(]+)\(")
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index 7f08ee2f86..effec7b357 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -209,7 +209,7 @@
'sources': [
'<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
- '../../src/snapshot-empty.cc',
+ '../../src/snapshot/snapshot-empty.cc',
],
'conditions': [
['want_separate_host_toolset==1', {
@@ -267,8 +267,8 @@
'../..',
],
'sources': [
- '../../src/natives-external.cc',
- '../../src/snapshot-external.cc',
+ '../../src/snapshot/natives-external.cc',
+ '../../src/snapshot/snapshot-external.cc',
],
'actions': [
{
@@ -442,9 +442,7 @@
'../../src/compiler/frame.h',
'../../src/compiler/gap-resolver.cc',
'../../src/compiler/gap-resolver.h',
- '../../src/compiler/generic-algorithm.h',
'../../src/compiler/graph-builder.h',
- '../../src/compiler/graph-inl.h',
'../../src/compiler/graph-reducer.cc',
'../../src/compiler/graph-reducer.h',
'../../src/compiler/graph-replay.cc',
@@ -473,6 +471,8 @@
'../../src/compiler/js-intrinsic-lowering.h',
'../../src/compiler/js-operator.cc',
'../../src/compiler/js-operator.h',
+ '../../src/compiler/js-type-feedback.cc',
+ '../../src/compiler/js-type-feedback.h',
'../../src/compiler/js-typed-lowering.cc',
'../../src/compiler/js-typed-lowering.h',
'../../src/compiler/jump-threading.cc',
@@ -480,6 +480,8 @@
'../../src/compiler/linkage-impl.h',
'../../src/compiler/linkage.cc',
'../../src/compiler/linkage.h',
+ '../../src/compiler/liveness-analyzer.cc',
+ '../../src/compiler/liveness-analyzer.h',
'../../src/compiler/load-elimination.cc',
'../../src/compiler/load-elimination.h',
'../../src/compiler/loop-analysis.cc',
@@ -499,6 +501,7 @@
'../../src/compiler/node-cache.h',
'../../src/compiler/node-marker.cc',
'../../src/compiler/node-marker.h',
+ '../../src/compiler/node-matchers.cc',
'../../src/compiler/node-matchers.h',
'../../src/compiler/node-properties.cc',
'../../src/compiler/node-properties.h',
@@ -539,6 +542,8 @@
'../../src/compiler/simplified-operator.h',
'../../src/compiler/source-position.cc',
'../../src/compiler/source-position.h',
+ '../../src/compiler/state-values-utils.cc',
+ '../../src/compiler/state-values-utils.h',
'../../src/compiler/typer.cc',
'../../src/compiler/typer.h',
'../../src/compiler/value-numbering-reducer.cc',
@@ -756,7 +761,6 @@
'../../src/modules.cc',
'../../src/modules.h',
'../../src/msan.h',
- '../../src/natives.h',
'../../src/objects-debug.cc',
'../../src/objects-inl.h',
'../../src/objects-printer.cc',
@@ -768,6 +772,8 @@
'../../src/ostreams.h',
'../../src/parser.cc',
'../../src/parser.h',
+ '../../src/pending-compilation-error-handler.cc',
+ '../../src/pending-compilation-error-handler.h',
'../../src/perf-jit.cc',
'../../src/perf-jit.h',
'../../src/preparse-data-format.h',
@@ -837,20 +843,23 @@
'../../src/scopeinfo.h',
'../../src/scopes.cc',
'../../src/scopes.h',
- '../../src/serialize.cc',
- '../../src/serialize.h',
'../../src/small-pointer-list.h',
'../../src/smart-pointers.h',
- '../../src/snapshot.h',
- '../../src/snapshot-common.cc',
- '../../src/snapshot-source-sink.cc',
- '../../src/snapshot-source-sink.h',
+ '../../src/snapshot/natives.h',
+ '../../src/snapshot/serialize.cc',
+ '../../src/snapshot/serialize.h',
+ '../../src/snapshot/snapshot.h',
+ '../../src/snapshot/snapshot-common.cc',
+ '../../src/snapshot/snapshot-source-sink.cc',
+ '../../src/snapshot/snapshot-source-sink.h',
'../../src/string-builder.cc',
'../../src/string-builder.h',
'../../src/string-search.cc',
'../../src/string-search.h',
'../../src/string-stream.cc',
'../../src/string-stream.h',
+ '../../src/strings-storage.cc',
+ '../../src/strings-storage.h',
'../../src/strtod.cc',
'../../src/strtod.h',
'../../src/ic/stub-cache.cc',
@@ -1363,6 +1372,7 @@
['nacl_target_arch=="none"', {
'link_settings': {
'libraries': [
+ '-ldl',
'-lrt'
],
},
@@ -1382,6 +1392,11 @@
'sources': [
'../../src/base/platform/platform-posix.cc'
],
+ 'link_settings': {
+ 'libraries': [
+ '-ldl'
+ ]
+ },
'conditions': [
['host_os=="mac"', {
'target_conditions': [
@@ -1675,8 +1690,8 @@
'../../src/array.js',
'../../src/string.js',
'../../src/uri.js',
- '../../src/third_party/fdlibm/fdlibm.js',
'../../src/math.js',
+ '../../src/third_party/fdlibm/fdlibm.js',
'../../src/date.js',
'../../src/regexp.js',
'../../src/arraybuffer.js',
@@ -1694,19 +1709,19 @@
'../../src/debug-debugger.js',
'../../src/mirror-debugger.js',
'../../src/liveedit-debugger.js',
+ '../../src/templates.js',
'../../src/macros.py',
],
'experimental_library_files': [
'../../src/macros.py',
'../../src/proxy.js',
'../../src/generator.js',
- '../../src/harmony-string.js',
'../../src/harmony-array.js',
'../../src/harmony-array-includes.js',
'../../src/harmony-tostring.js',
'../../src/harmony-typedarray.js',
- '../../src/harmony-templates.js',
- '../../src/harmony-regexp.js'
+ '../../src/harmony-regexp.js',
+ '../../src/harmony-reflect.js'
],
'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
@@ -1802,7 +1817,7 @@
'../..',
],
'sources': [
- '../../src/mksnapshot.cc',
+ '../../src/snapshot/mksnapshot.cc',
],
'conditions': [
['v8_enable_i18n_support==1', {
diff --git a/deps/v8/tools/js2c.py b/deps/v8/tools/js2c.py
index 621ed5a21d..327e7d916c 100755
--- a/deps/v8/tools/js2c.py
+++ b/deps/v8/tools/js2c.py
@@ -247,7 +247,7 @@ HEADER_TEMPLATE = """\
// javascript source files or the GYP script.
#include "src/v8.h"
-#include "src/natives.h"
+#include "src/snapshot/natives.h"
#include "src/utils.h"
namespace v8 {
diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py
index 409b396917..f9bea4a61f 100755
--- a/deps/v8/tools/ll_prof.py
+++ b/deps/v8/tools/ll_prof.py
@@ -711,6 +711,23 @@ class LibraryRepo(object):
self.names = set()
self.ticks = {}
+
+ def HasDynamicSymbols(self, filename):
+ if filename.endswith(".ko"): return False
+ process = subprocess.Popen(
+ "%s -h %s" % (OBJDUMP_BIN, filename),
+ shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ pipe = process.stdout
+ try:
+ for line in pipe:
+ match = OBJDUMP_SECTION_HEADER_RE.match(line)
+ if match and match.group(1) == 'dynsym': return True
+ finally:
+ pipe.close()
+ assert process.wait() == 0, "Failed to objdump -h %s" % filename
+ return False
+
+
def Load(self, mmap_info, code_map, options):
# Skip kernel mmaps when requested using the fact that their tid
# is 0.
@@ -730,10 +747,10 @@ class LibraryRepo(object):
# Unfortunately, section headers span two lines, so we have to
# keep the just seen section name (from the first line in each
# section header) in the after_section variable.
- if mmap_info.filename.endswith(".ko"):
- dynamic_symbols = ""
- else:
+ if self.HasDynamicSymbols(mmap_info.filename):
dynamic_symbols = "-T"
+ else:
+ dynamic_symbols = ""
process = subprocess.Popen(
"%s -h -t %s -C %s" % (OBJDUMP_BIN, dynamic_symbols, mmap_info.filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
diff --git a/deps/v8/tools/parser-shell.cc b/deps/v8/tools/parser-shell.cc
index 3e41bf94bb..3fe735986b 100644
--- a/deps/v8/tools/parser-shell.cc
+++ b/deps/v8/tools/parser-shell.cc
@@ -88,14 +88,16 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
i::ScriptData* cached_data_impl = NULL;
// First round of parsing (produce data to cache).
{
- CompilationInfoWithZone info(script);
- info.MarkAsGlobal();
- info.SetCachedData(&cached_data_impl,
- v8::ScriptCompiler::kProduceParserCache);
+ Zone zone;
+ ParseInfo info(&zone, script);
+ info.set_global();
+ info.set_cached_data(&cached_data_impl);
+ info.set_compile_options(v8::ScriptCompiler::kProduceParserCache);
v8::base::ElapsedTimer timer;
timer.Start();
// Allow lazy parsing; otherwise we won't produce cached data.
- bool success = Parser::ParseStatic(&info, true);
+ info.set_allow_lazy_parsing();
+ bool success = Parser::ParseStatic(&info);
parse_time1 = timer.Elapsed();
if (!success) {
fprintf(stderr, "Parsing failed\n");
@@ -104,14 +106,16 @@ std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
}
// Second round of parsing (consume cached data).
{
- CompilationInfoWithZone info(script);
- info.MarkAsGlobal();
- info.SetCachedData(&cached_data_impl,
- v8::ScriptCompiler::kConsumeParserCache);
+ Zone zone;
+ ParseInfo info(&zone, script);
+ info.set_global();
+ info.set_cached_data(&cached_data_impl);
+ info.set_compile_options(v8::ScriptCompiler::kConsumeParserCache);
v8::base::ElapsedTimer timer;
timer.Start();
// Allow lazy parsing; otherwise cached data won't help.
- bool success = Parser::ParseStatic(&info, true);
+ info.set_allow_lazy_parsing();
+ bool success = Parser::ParseStatic(&info);
parse_time2 = timer.Elapsed();
if (!success) {
fprintf(stderr, "Parsing failed\n");
diff --git a/deps/v8/tools/perf-to-html.py b/deps/v8/tools/perf-to-html.py
new file mode 100755
index 0000000000..63faeb1d66
--- /dev/null
+++ b/deps/v8/tools/perf-to-html.py
@@ -0,0 +1,378 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+'''
+python %prog
+
+Convert a perf trybot JSON file into a pleasing HTML page. It can read
+from standard input or via the --filename option. Examples:
+
+ cat results.json | %prog --title "ia32 results"
+ %prog -f results.json -t "ia32 results" -o results.html
+'''
+
+import commands
+import json
+import math
+from optparse import OptionParser
+import os
+import shutil
+import sys
+import tempfile
+
+PERCENT_CONSIDERED_SIGNIFICANT = 0.5
+PROBABILITY_CONSIDERED_SIGNIFICANT = 0.02
+PROBABILITY_CONSIDERED_MEANINGLESS = 0.05
+
+
+def ComputeZ(baseline_avg, baseline_sigma, mean, n):
+ if baseline_sigma == 0:
+ return 1000.0;
+ return abs((mean - baseline_avg) / (baseline_sigma / math.sqrt(n)))
+
+
+# Values from http://www.fourmilab.ch/rpkp/experiments/analysis/zCalc.html
+def ComputeProbability(z):
+ if z > 2.575829: # p 0.005: two sided < 0.01
+ return 0
+ if z > 2.326348: # p 0.010
+ return 0.01
+ if z > 2.170091: # p 0.015
+ return 0.02
+ if z > 2.053749: # p 0.020
+ return 0.03
+ if z > 1.959964: # p 0.025: two sided < 0.05
+ return 0.04
+ if z > 1.880793: # p 0.030
+ return 0.05
+ if z > 1.811910: # p 0.035
+ return 0.06
+ if z > 1.750686: # p 0.040
+ return 0.07
+ if z > 1.695397: # p 0.045
+ return 0.08
+ if z > 1.644853: # p 0.050: two sided < 0.10
+ return 0.09
+ if z > 1.281551: # p 0.100: two sided < 0.20
+ return 0.10
+ return 0.20 # two sided p >= 0.20
+
+
+class Result:
+ def __init__(self, test_name, count, hasScoreUnits, result, sigma,
+ master_result, master_sigma):
+ self.result_ = float(result)
+ self.sigma_ = float(sigma)
+ self.master_result_ = float(master_result)
+ self.master_sigma_ = float(master_sigma)
+ self.significant_ = False
+ self.notable_ = 0
+ self.percentage_string_ = ""
+ # compute notability and significance.
+ if hasScoreUnits:
+ compare_num = 100*self.result_/self.master_result_ - 100
+ else:
+ compare_num = 100*self.master_result_/self.result_ - 100
+ if abs(compare_num) > 0.1:
+ self.percentage_string_ = "%3.1f" % (compare_num)
+ z = ComputeZ(self.master_result_, self.master_sigma_, self.result_, count)
+ p = ComputeProbability(z)
+ if p < PROBABILITY_CONSIDERED_SIGNIFICANT:
+ self.significant_ = True
+ if compare_num >= PERCENT_CONSIDERED_SIGNIFICANT:
+ self.notable_ = 1
+ elif compare_num <= -PERCENT_CONSIDERED_SIGNIFICANT:
+ self.notable_ = -1
+
+ def result(self):
+ return self.result_
+
+ def sigma(self):
+ return self.sigma_
+
+ def master_result(self):
+ return self.master_result_
+
+ def master_sigma(self):
+ return self.master_sigma_
+
+ def percentage_string(self):
+ return self.percentage_string_;
+
+ def isSignificant(self):
+ return self.significant_
+
+ def isNotablyPositive(self):
+ return self.notable_ > 0
+
+ def isNotablyNegative(self):
+ return self.notable_ < 0
+
+
+class Benchmark:
+ def __init__(self, name, data):
+ self.name_ = name
+ self.tests_ = {}
+ for test in data:
+ # strip off "<name>/" prefix
+ test_name = test.split("/")[1]
+ self.appendResult(test_name, data[test])
+
+ # tests is a dictionary of Results
+ def tests(self):
+ return self.tests_
+
+ def SortedTestKeys(self):
+ keys = self.tests_.keys()
+ keys.sort()
+ t = "Total"
+ if t in keys:
+ keys.remove(t)
+ keys.append(t)
+ return keys
+
+ def name(self):
+ return self.name_
+
+ def appendResult(self, test_name, test_data):
+ with_string = test_data["result with patch "]
+ data = with_string.split()
+ master_string = test_data["result without patch"]
+ master_data = master_string.split()
+ runs = int(test_data["runs"])
+ units = test_data["units"]
+ hasScoreUnits = units == "score"
+ self.tests_[test_name] = Result(test_name,
+ runs,
+ hasScoreUnits,
+ data[0], data[2],
+ master_data[0], master_data[2])
+
+
+class BenchmarkRenderer:
+ def __init__(self, output_file):
+ self.print_output_ = []
+ self.output_file_ = output_file
+
+ def Print(self, str_data):
+ self.print_output_.append(str_data)
+
+ def FlushOutput(self):
+ string_data = "\n".join(self.print_output_)
+ print_output = []
+ if self.output_file_:
+ # create a file
+ with open(self.output_file_, "w") as text_file:
+ text_file.write(string_data)
+ else:
+ print(string_data)
+
+ def RenderOneBenchmark(self, benchmark):
+ self.Print("<h2>")
+ self.Print("<a name=\"" + benchmark.name() + "\">")
+ self.Print(benchmark.name() + "</a> <a href=\"#top\">(top)</a>")
+ self.Print("</h2>");
+ self.Print("<table class=\"benchmark\">")
+ self.Print("<thead>")
+ self.Print(" <th>Test</th>")
+ self.Print(" <th>Result</th>")
+ self.Print(" <th>Master</th>")
+ self.Print(" <th>%</th>")
+ self.Print("</thead>")
+ self.Print("<tbody>")
+ tests = benchmark.tests()
+ for test in benchmark.SortedTestKeys():
+ t = tests[test]
+ self.Print(" <tr>")
+ self.Print(" <td>" + test + "</td>")
+ self.Print(" <td>" + str(t.result()) + "</td>")
+ self.Print(" <td>" + str(t.master_result()) + "</td>")
+ t = tests[test]
+ res = t.percentage_string()
+ if t.isSignificant():
+ res = self.bold(res)
+ if t.isNotablyPositive():
+ res = self.green(res)
+ elif t.isNotablyNegative():
+ res = self.red(res)
+ self.Print(" <td>" + res + "</td>")
+ self.Print(" </tr>")
+ self.Print("</tbody>")
+ self.Print("</table>")
+
+ def ProcessJSONData(self, data, title):
+ self.Print("<h1>" + title + "</h1>")
+ self.Print("<ul>")
+ for benchmark in data:
+ if benchmark != "errors":
+ self.Print("<li><a href=\"#" + benchmark + "\">" + benchmark + "</a></li>")
+ self.Print("</ul>")
+ for benchmark in data:
+ if benchmark != "errors":
+ benchmark_object = Benchmark(benchmark, data[benchmark])
+ self.RenderOneBenchmark(benchmark_object)
+
+ def bold(self, data):
+ return "<b>" + data + "</b>"
+
+ def red(self, data):
+ return "<font color=\"red\">" + data + "</font>"
+
+
+ def green(self, data):
+ return "<font color=\"green\">" + data + "</font>"
+
+ def PrintHeader(self):
+ data = """<html>
+<head>
+<title>Output</title>
+<style type="text/css">
+/*
+Style inspired by Andy Ferra's gist at https://gist.github.com/andyferra/2554919
+*/
+body {
+ font-family: Helvetica, arial, sans-serif;
+ font-size: 14px;
+ line-height: 1.6;
+ padding-top: 10px;
+ padding-bottom: 10px;
+ background-color: white;
+ padding: 30px;
+}
+h1, h2, h3, h4, h5, h6 {
+ margin: 20px 0 10px;
+ padding: 0;
+ font-weight: bold;
+ -webkit-font-smoothing: antialiased;
+ cursor: text;
+ position: relative;
+}
+h1 {
+ font-size: 28px;
+ color: black;
+}
+
+h2 {
+ font-size: 24px;
+ border-bottom: 1px solid #cccccc;
+ color: black;
+}
+
+h3 {
+ font-size: 18px;
+}
+
+h4 {
+ font-size: 16px;
+}
+
+h5 {
+ font-size: 14px;
+}
+
+h6 {
+ color: #777777;
+ font-size: 14px;
+}
+
+p, blockquote, ul, ol, dl, li, table, pre {
+ margin: 15px 0;
+}
+
+li p.first {
+ display: inline-block;
+}
+
+ul, ol {
+ padding-left: 30px;
+}
+
+ul :first-child, ol :first-child {
+ margin-top: 0;
+}
+
+ul :last-child, ol :last-child {
+ margin-bottom: 0;
+}
+
+table {
+ padding: 0;
+}
+
+table tr {
+ border-top: 1px solid #cccccc;
+ background-color: white;
+ margin: 0;
+ padding: 0;
+}
+
+table tr:nth-child(2n) {
+ background-color: #f8f8f8;
+}
+
+table tr th {
+ font-weight: bold;
+ border: 1px solid #cccccc;
+ text-align: left;
+ margin: 0;
+ padding: 6px 13px;
+}
+table tr td {
+ border: 1px solid #cccccc;
+ text-align: left;
+ margin: 0;
+ padding: 6px 13px;
+}
+table tr th :first-child, table tr td :first-child {
+ margin-top: 0;
+}
+table tr th :last-child, table tr td :last-child {
+ margin-bottom: 0;
+}
+</style>
+</head>
+<body>
+"""
+ self.Print(data)
+
+ def PrintFooter(self):
+ data = """</body>
+</html>
+"""
+ self.Print(data)
+
+
+def Render(opts, args):
+ if opts.filename:
+ with open(opts.filename) as json_data:
+ data = json.load(json_data)
+ else:
+ # load data from stdin
+ data = json.load(sys.stdin)
+
+ if opts.title:
+ title = opts.title
+ elif opts.filename:
+ title = opts.filename
+ else:
+ title = "Benchmark results"
+ renderer = BenchmarkRenderer(opts.output)
+ renderer.PrintHeader()
+ renderer.ProcessJSONData(data, title)
+ renderer.PrintFooter()
+ renderer.FlushOutput()
+
+
+if __name__ == '__main__':
+ parser = OptionParser(usage=__doc__)
+ parser.add_option("-f", "--filename", dest="filename",
+ help="Specifies the filename for the JSON results "
+ "rather than reading from stdin.")
+ parser.add_option("-t", "--title", dest="title",
+ help="Optional title of the web page.")
+ parser.add_option("-o", "--output", dest="output",
+ help="Write html output to this file rather than stdout.")
+
+ (opts, args) = parser.parse_args()
+ Render(opts, args)
diff --git a/deps/v8/tools/release/auto_push.py b/deps/v8/tools/release/auto_push.py
index 121288f5b5..aba5cba72a 100755
--- a/deps/v8/tools/release/auto_push.py
+++ b/deps/v8/tools/release/auto_push.py
@@ -34,15 +34,15 @@ import sys
import urllib
from common_includes import *
-import push_to_candidates
+import create_release
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
- self.InitialEnvironmentChecks(self.default_cwd)
- self.CommonPrepare()
+ # Fetch unfetched revisions.
+ self.vc.Fetch()
class FetchCandidate(Step):
@@ -67,11 +67,11 @@ class LastReleaseBailout(Step):
return True
-class PushToCandidates(Step):
- MESSAGE = "Pushing to candidates if specified."
+class CreateRelease(Step):
+ MESSAGE = "Creating release if specified."
def RunStep(self):
- print "Pushing candidate %s to candidates." % self["candidate"]
+ print "Creating release for %s." % self["candidate"]
args = [
"--author", self._options.author,
@@ -83,16 +83,15 @@ class PushToCandidates(Step):
if self._options.work_dir:
args.extend(["--work-dir", self._options.work_dir])
- # TODO(machenbach): Update the script before calling it.
if self._options.push:
self._side_effect_handler.Call(
- push_to_candidates.PushToCandidates().Run, args)
+ create_release.CreateRelease().Run, args)
class AutoPush(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-p", "--push",
- help="Push to candidates. Dry run if unspecified.",
+ help="Create release. Dry run if unspecified.",
default=False, action="store_true")
def _ProcessOptions(self, options):
@@ -112,7 +111,7 @@ class AutoPush(ScriptsBase):
Preparation,
FetchCandidate,
LastReleaseBailout,
- PushToCandidates,
+ CreateRelease,
]
diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py
index 315a4bc2a0..f7692cf6f9 100755
--- a/deps/v8/tools/release/auto_roll.py
+++ b/deps/v8/tools/release/auto_roll.py
@@ -42,8 +42,11 @@ class DetectLastRoll(Step):
MESSAGE = "Detect commit ID of the last Chromium roll."
def RunStep(self):
- # The revision that should be rolled.
- latest_release = self.GetLatestRelease()
+ # The revision that should be rolled. Check for the latest of the most
+ # recent releases based on commit timestamp.
+ revisions = self.GetRecentReleases(
+ max_age=self._options.max_age * DAY_IN_SECONDS)
+ assert revisions, "Didn't find any recent release."
# Interpret the DEPS file to retrieve the v8 revision.
# TODO(machenbach): This should be part or the roll-deps api of
@@ -53,39 +56,27 @@ class DetectLastRoll(Step):
# The revision rolled last.
self["last_roll"] = vars['v8_revision']
-
- # TODO(machenbach): It is possible that the auto-push script made a new
- # fast-forward release (e.g. 4.2.3) while somebody patches the last
- # candidate (e.g. 4.2.2.1). In this case, the auto-roller would pick
- # the fast-forward release. Should there be a way to prioritize the
- # patched version?
-
- if latest_release == self["last_roll"]:
- # We always try to roll if the latest revision is not the revision in
- # chromium.
+ last_version = self.GetVersionTag(self["last_roll"])
+ assert last_version, "The last rolled v8 revision is not tagged."
+
+ # There must be some progress between the last roll and the new candidate
+ # revision (i.e. we don't go backwards). The revisions are ordered newest
+ # to oldest. It is possible that the newest timestamp has no progress
+ # compared to the last roll, i.e. if the newest release is a cherry-pick
+ # on a release branch. Then we look further.
+ for revision in revisions:
+ version = self.GetVersionTag(revision)
+ assert version, "Internal error. All recent releases should have a tag"
+
+ if SortingKey(last_version) < SortingKey(version):
+ self["roll"] = revision
+ break
+ else:
print("There is no newer v8 revision than the one in Chromium (%s)."
% self["last_roll"])
return True
-class CheckClusterFuzz(Step):
- MESSAGE = "Check ClusterFuzz api for new problems."
-
- def RunStep(self):
- if not os.path.exists(self.Config("CLUSTERFUZZ_API_KEY_FILE")):
- print "Skipping ClusterFuzz check. No api key file found."
- return False
- api_key = FileToText(self.Config("CLUSTERFUZZ_API_KEY_FILE"))
- # Check for open, reproducible issues that have no associated bug.
- result = self._side_effect_handler.ReadClusterFuzzAPI(
- api_key, job_type="linux_asan_d8_dbg", reproducible="True",
- open="True", bug_information="",
- revision_greater_or_equal=str(self["last_push"]))
- if result:
- print "Stop due to pending ClusterFuzz issues."
- return True
-
-
class RollChromium(Step):
MESSAGE = "Roll V8 into Chromium."
@@ -97,12 +88,12 @@ class RollChromium(Step):
"--chromium", self._options.chromium,
"--last-roll", self["last_roll"],
"--use-commit-queue",
+ self["roll"],
]
if self._options.sheriff:
- args.extend([
- "--sheriff", "--googlers-mapping", self._options.googlers_mapping])
+ args.append("--sheriff")
if self._options.dry_run:
- args.extend(["--dry-run"])
+ args.append("--dry-run")
if self._options.work_dir:
args.extend(["--work-dir", self._options.work_dir])
self._side_effect_handler.Call(chromium_roll.ChromiumRoll().Run, args)
@@ -113,6 +104,8 @@ class AutoRoll(ScriptsBase):
parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
+ parser.add_argument("--max-age", default=3, type=int,
+ help="Maximum age in days of the latest release.")
parser.add_argument("--roll", help="Call Chromium roll script.",
default=False, action="store_true")
@@ -128,14 +121,12 @@ class AutoRoll(ScriptsBase):
def _Config(self):
return {
"PERSISTFILE_BASENAME": "/tmp/v8-auto-roll-tempfile",
- "CLUSTERFUZZ_API_KEY_FILE": ".cf_api_key",
}
def _Steps(self):
return [
CheckActiveRoll,
DetectLastRoll,
- CheckClusterFuzz,
RollChromium,
]
diff --git a/deps/v8/tools/release/chromium_roll.py b/deps/v8/tools/release/chromium_roll.py
index 8a3ff4a0a7..de0a569a90 100755
--- a/deps/v8/tools/release/chromium_roll.py
+++ b/deps/v8/tools/release/chromium_roll.py
@@ -14,36 +14,31 @@ ROLL_SUMMARY = ("Summary of changes available at:\n"
"https://chromium.googlesource.com/v8/v8/+log/%s..%s")
+ISSUE_MSG = (
+"""Please follow these instructions for assigning/CC'ing issues:
+https://code.google.com/p/v8-wiki/wiki/TriagingIssues""")
+
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
# Update v8 remote tracking branches.
self.GitFetchOrigin()
+ self.Git("fetch origin +refs/tags/*:refs/tags/*")
-class DetectLastPush(Step):
- MESSAGE = "Detect commit ID of last release."
+class PrepareRollCandidate(Step):
+ MESSAGE = "Robustness checks of the roll candidate."
def RunStep(self):
- # The revision that should be rolled.
- self["last_push"] = self._options.last_push or self.GetLatestRelease()
- self["push_title"] = self.GitLog(n=1, format="%s",
- git_hash=self["last_push"])
-
- # The master revision this release is based on.
- self["push_base"] = self.GetLatestReleaseBase()
+ self["roll_title"] = self.GitLog(n=1, format="%s",
+ git_hash=self._options.roll)
- # FIXME(machenbach): Manually specifying a revision doesn't work at the
- # moment. Needs more complicated logic to find the correct push_base above.
- # Maybe delete that parameter entirely?
- assert not self._options.last_push
-
- # Determine the master revision of the last roll.
+ # Make sure the last roll and the roll candidate are releases.
+ version = self.GetVersionTag(self._options.roll)
+ assert version, "The revision to roll is not tagged."
version = self.GetVersionTag(self._options.last_roll)
- assert version
- self["last_rolled_base"] = self.GetLatestReleaseBase(version=version)
- assert self["last_rolled_base"]
+ assert version, "The revision used as last roll is not tagged."
class SwitchChromium(Step):
@@ -73,7 +68,7 @@ class UpdateChromiumCheckout(Step):
# Update v8 remotes.
self.GitFetchOrigin()
- self.GitCreateBranch("v8-roll-%s" % self["last_push"],
+ self.GitCreateBranch("v8-roll-%s" % self._options.roll,
cwd=self._options.chromium)
@@ -83,19 +78,18 @@ class UploadCL(Step):
def RunStep(self):
# Patch DEPS file.
if self.Command(
- "roll-dep", "v8 %s" % self["last_push"],
+ "roll-dep", "v8 %s" % self._options.roll,
cwd=self._options.chromium) is None:
- self.Die("Failed to create deps for %s" % self["last_push"])
+ self.Die("Failed to create deps for %s" % self._options.roll)
message = []
- message.append("Update V8 to %s." % self["push_title"].lower())
+ message.append("Update V8 to %s." % self["roll_title"].lower())
message.append(
- ROLL_SUMMARY % (self["last_rolled_base"][:8], self["push_base"][:8]))
+ ROLL_SUMMARY % (self._options.last_roll[:8], self._options.roll[:8]))
+
+ message.append(ISSUE_MSG)
- if self["sheriff"]:
- message.append("Please reply to the V8 sheriff %s in case of problems."
- % self["sheriff"])
message.append("TBR=%s" % self._options.reviewer)
self.GitCommit("\n\n".join(message),
author=self._options.author,
@@ -108,7 +102,7 @@ class UploadCL(Step):
print "CL uploaded."
else:
self.GitCheckout("master", cwd=self._options.chromium)
- self.GitDeleteBranch("v8-roll-%s" % self["last_push"],
+ self.GitDeleteBranch("v8-roll-%s" % self._options.roll,
cwd=self._options.chromium)
print "Dry run - don't upload."
@@ -127,8 +121,8 @@ class CleanUp(Step):
def RunStep(self):
print("Congratulations, you have successfully rolled %s into "
- "Chromium. Please don't forget to update the v8rel spreadsheet."
- % self["last_push"])
+ "Chromium."
+ % self._options.roll)
# Clean up all temporary files.
Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
@@ -139,10 +133,9 @@ class ChromiumRoll(ScriptsBase):
parser.add_argument("-c", "--chromium", required=True,
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
- parser.add_argument("-l", "--last-push",
- help="The git commit ID of the last candidates push.")
parser.add_argument("--last-roll", required=True,
help="The git commit ID of the last rolled version.")
+ parser.add_argument("roll", nargs=1, help="Revision to roll."),
parser.add_argument("--use-commit-queue",
help="Check the CQ bit on upload.",
default=False, action="store_true")
@@ -155,6 +148,7 @@ class ChromiumRoll(ScriptsBase):
options.requires_editor = False
options.force = True
options.manual = False
+ options.roll = options.roll[0]
return True
def _Config(self):
@@ -165,7 +159,7 @@ class ChromiumRoll(ScriptsBase):
def _Steps(self):
return [
Preparation,
- DetectLastPush,
+ PrepareRollCandidate,
DetermineV8Sheriff,
SwitchChromium,
UpdateChromiumCheckout,
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index bae05bc6b5..19841a34a6 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -46,9 +46,10 @@ from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
CHANGELOG_FILE = "ChangeLog"
+DAY_IN_SECONDS = 24 * 60 * 60
PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
-VERSION_FILE = os.path.join("src", "version.cc")
+VERSION_FILE = os.path.join("include", "v8-version.h")
VERSION_RE = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
# V8 base directory.
@@ -510,12 +511,12 @@ class Step(GitRecipesMixin):
answer = self.ReadLine(default="Y")
return answer == "" or answer == "Y" or answer == "y"
- def DeleteBranch(self, name):
- for line in self.GitBranch().splitlines():
+ def DeleteBranch(self, name, cwd=None):
+ for line in self.GitBranch(cwd=cwd).splitlines():
if re.match(r"\*?\s*%s$" % re.escape(name), line):
msg = "Branch %s exists, do you want to delete it?" % name
if self.Confirm(msg):
- self.GitDeleteBranch(name)
+ self.GitDeleteBranch(name, cwd=cwd)
print "Branch %s deleted." % name
else:
msg = "Can't continue. Please delete branch %s and try again." % name
@@ -537,8 +538,8 @@ class Step(GitRecipesMixin):
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
- # Persist current branch.
- self["current_branch"] = self.GitCurrentBranch()
+ # Checkout master in case the script was left on a work branch.
+ self.GitCheckout('origin/master')
# Fetch unfetched revisions.
self.vc.Fetch()
@@ -548,12 +549,8 @@ class Step(GitRecipesMixin):
self.DeleteBranch(self._config["BRANCHNAME"])
def CommonCleanup(self):
- if ' ' in self["current_branch"]:
- self.GitCheckout('master')
- else:
- self.GitCheckout(self["current_branch"])
- if self._config["BRANCHNAME"] != self["current_branch"]:
- self.GitDeleteBranch(self._config["BRANCHNAME"])
+ self.GitCheckout('origin/master')
+ self.GitDeleteBranch(self._config["BRANCHNAME"])
# Clean up all temporary files.
for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
@@ -569,10 +566,10 @@ class Step(GitRecipesMixin):
value = match.group(1)
self["%s%s" % (prefix, var_name)] = value
for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
- for (var_name, def_name) in [("major", "MAJOR_VERSION"),
- ("minor", "MINOR_VERSION"),
- ("build", "BUILD_NUMBER"),
- ("patch", "PATCH_LEVEL")]:
+ for (var_name, def_name) in [("major", "V8_MAJOR_VERSION"),
+ ("minor", "V8_MINOR_VERSION"),
+ ("build", "V8_BUILD_NUMBER"),
+ ("patch", "V8_PATCH_LEVEL")]:
ReadAndPersist(var_name, def_name)
def WaitForLGTM(self):
@@ -701,16 +698,16 @@ class Step(GitRecipesMixin):
def SetVersion(self, version_file, prefix):
output = ""
for line in FileToText(version_file).splitlines():
- if line.startswith("#define MAJOR_VERSION"):
+ if line.startswith("#define V8_MAJOR_VERSION"):
line = re.sub("\d+$", self[prefix + "major"], line)
- elif line.startswith("#define MINOR_VERSION"):
+ elif line.startswith("#define V8_MINOR_VERSION"):
line = re.sub("\d+$", self[prefix + "minor"], line)
- elif line.startswith("#define BUILD_NUMBER"):
+ elif line.startswith("#define V8_BUILD_NUMBER"):
line = re.sub("\d+$", self[prefix + "build"], line)
- elif line.startswith("#define PATCH_LEVEL"):
+ elif line.startswith("#define V8_PATCH_LEVEL"):
line = re.sub("\d+$", self[prefix + "patch"], line)
elif (self[prefix + "candidate"] and
- line.startswith("#define IS_CANDIDATE_VERSION")):
+ line.startswith("#define V8_IS_CANDIDATE_VERSION")):
line = re.sub("\d+$", self[prefix + "candidate"], line)
output += "%s\n" % line
TextToFile(output, version_file)
@@ -753,16 +750,6 @@ class DetermineV8Sheriff(Step):
if not self._options.sheriff: # pragma: no cover
return
- try:
- # The googlers mapping maps @google.com accounts to @chromium.org
- # accounts.
- googlers = imp.load_source('googlers_mapping',
- self._options.googlers_mapping)
- googlers = googlers.list_to_dict(googlers.get_list())
- except: # pragma: no cover
- print "Skip determining sheriff without googler mapping."
- return
-
# The sheriff determined by the rotation on the waterfall has a
# @google.com account.
url = "https://chromium-build.appspot.com/p/chromium/sheriff_v8.js"
@@ -771,9 +758,11 @@ class DetermineV8Sheriff(Step):
# If "channel is sheriff", we can't match an account.
if match:
g_name = match.group(1)
- self["sheriff"] = googlers.get(g_name + "@google.com",
- g_name + "@chromium.org")
- self._options.reviewer = self["sheriff"]
+ # Optimistically assume that google and chromium account name are the
+ # same.
+ self["sheriff"] = g_name + "@chromium.org"
+ self._options.reviewer = ("%s,%s" %
+ (self["sheriff"], self._options.reviewer))
print "Found active sheriff: %s" % self["sheriff"]
else:
print "No active sheriff found."
@@ -825,8 +814,6 @@ class ScriptsBase(object):
help="The author email used for rietveld.")
parser.add_argument("--dry-run", default=False, action="store_true",
help="Perform only read-only actions.")
- parser.add_argument("-g", "--googlers-mapping",
- help="Path to the script mapping google accounts.")
parser.add_argument("-r", "--reviewer", default="",
help="The account name to be used for reviews.")
parser.add_argument("--sheriff", default=False, action="store_true",
@@ -851,10 +838,6 @@ class ScriptsBase(object):
print "Bad step number %d" % options.step
parser.print_help()
return None
- if options.sheriff and not options.googlers_mapping: # pragma: no cover
- print "To determine the current sheriff, requires the googler mapping"
- parser.print_help()
- return None
# Defaults for options, common to all scripts.
options.manual = getattr(options, "manual", True)
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index 44c10d9b30..3bbb50e491 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -30,23 +30,10 @@ class PrepareBranchRevision(Step):
MESSAGE = "Check from which revision to branch off."
def RunStep(self):
- if self._options.revision:
- self["push_hash"], tree_object = self.GitLog(
- n=1, format="\"%H %T\"", git_hash=self._options.revision).split(" ")
- else:
- self["push_hash"], tree_object = self.GitLog(
- n=1, format="\"%H %T\"", branch="origin/master").split(" ")
- print "Release revision %s" % self["push_hash"]
+ self["push_hash"] = (self._options.revision or
+ self.GitLog(n=1, format="%H", branch="origin/master"))
assert self["push_hash"]
-
- pending_tuples = self.GitLog(
- n=200, format="\"%H %T\"", branch="refs/pending/heads/master")
- for hsh, tree in map(lambda s: s.split(" "), pending_tuples.splitlines()):
- if tree == tree_object:
- self["pending_hash"] = hsh
- break
- print "Pending release revision %s" % self["pending_hash"]
- assert self["pending_hash"]
+ print "Release revision %s" % self["push_hash"]
class IncrementVersion(Step):
@@ -174,7 +161,7 @@ class MakeBranch(Step):
def RunStep(self):
self.Git("reset --hard origin/master")
- self.Git("checkout -b work-branch %s" % self["pending_hash"])
+ self.Git("checkout -b work-branch %s" % self["push_hash"])
self.GitCheckoutFile(CHANGELOG_FILE, self["latest_version"])
self.GitCheckoutFile(VERSION_FILE, self["latest_version"])
@@ -229,8 +216,7 @@ class PushBranch(Step):
def RunStep(self):
pushspecs = [
"refs/heads/work-branch:refs/pending/heads/%s" % self["version"],
- "%s:refs/pending-tags/heads/%s" %
- (self["pending_hash"], self["version"]),
+ "%s:refs/pending-tags/heads/%s" % (self["push_hash"], self["version"]),
"%s:refs/heads/%s" % (self["push_hash"], self["version"]),
]
cmd = "push origin %s" % " ".join(pushspecs)
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index 7aa9fb6ab6..5fe3ba4251 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -169,12 +169,12 @@ class IncrementVersion(Step):
if self._options.revert_master:
return
new_patch = str(int(self["patch"]) + 1)
- if self.Confirm("Automatically increment PATCH_LEVEL? (Saying 'n' will "
+ if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will "
"fire up your EDITOR on %s so you can make arbitrary "
"changes. When you're done, save the file and exit your "
"EDITOR.)" % VERSION_FILE):
text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
- text = MSub(r"(?<=#define PATCH_LEVEL)(?P<space>\s+)\d*$",
+ text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P<space>\s+)\d*$",
r"\g<space>%s" % new_patch,
text)
TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
diff --git a/deps/v8/tools/release/releases.py b/deps/v8/tools/release/releases.py
index 0f35e7c88f..9ff6e9b8f6 100755
--- a/deps/v8/tools/release/releases.py
+++ b/deps/v8/tools/release/releases.py
@@ -294,7 +294,7 @@ class RetrieveV8Releases(Step):
releases = []
if self._options.branch == 'recent':
# List every release from the last 7 days.
- revisions = self.GetRecentReleases(max_age=7 * 24 * 60 * 60)
+ revisions = self.GetRecentReleases(max_age=7 * DAY_IN_SECONDS)
for revision in revisions:
releases += self.GetReleaseFromRevision(revision)
elif self._options.branch == 'all': # pragma: no cover
@@ -334,6 +334,7 @@ class UpdateChromiumCheckout(Step):
cwd = self._options.chromium
self.GitCheckout("master", cwd=cwd)
self.GitPull(cwd=cwd)
+ self.DeleteBranch(self.Config("BRANCHNAME"), cwd=cwd)
self.GitCreateBranch(self.Config("BRANCHNAME"), cwd=cwd)
@@ -488,6 +489,7 @@ class Releases(ScriptsBase):
parser.add_argument("--json", help="Path to a JSON file for export.")
def _ProcessOptions(self, options): # pragma: no cover
+ options.force_readline_defaults = True
return True
def _Config(self):
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index 3beddfd936..291ca38123 100644
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -64,7 +64,6 @@ TEST_CONFIG = {
"ALREADY_MERGING_SENTINEL_FILE":
"/tmp/test-merge-to-branch-tempfile-already-merging",
"TEMPORARY_PATCH_FILE": "/tmp/test-merge-to-branch-tempfile-temporary-patch",
- "CLUSTERFUZZ_API_KEY_FILE": "/tmp/test-fake-cf-api-key",
}
@@ -361,12 +360,12 @@ class ScriptTest(unittest.TestCase):
with open(version_file, "w") as f:
f.write(" // Some line...\n")
f.write("\n")
- f.write("#define MAJOR_VERSION %s\n" % major)
- f.write("#define MINOR_VERSION %s\n" % minor)
- f.write("#define BUILD_NUMBER %s\n" % build)
- f.write("#define PATCH_LEVEL %s\n" % patch)
+ f.write("#define V8_MAJOR_VERSION %s\n" % major)
+ f.write("#define V8_MINOR_VERSION %s\n" % minor)
+ f.write("#define V8_BUILD_NUMBER %s\n" % build)
+ f.write("#define V8_PATCH_LEVEL %s\n" % patch)
f.write(" // Some line...\n")
- f.write("#define IS_CANDIDATE_VERSION 0\n")
+ f.write("#define V8_IS_CANDIDATE_VERSION 0\n")
def MakeStep(self):
"""Convenience wrapper."""
@@ -397,11 +396,6 @@ class ScriptTest(unittest.TestCase):
else:
return self._mock.Call("readurl", url)
- def ReadClusterFuzzAPI(self, api_key, **params):
- # TODO(machenbach): Use a mock for this and add a test that stops rolling
- # due to clustefuzz results.
- return []
-
def Sleep(self, seconds):
pass
@@ -443,7 +437,7 @@ class ScriptTest(unittest.TestCase):
def testCommonPrepareDefault(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
@@ -451,24 +445,22 @@ class ScriptTest(unittest.TestCase):
])
self.MakeStep().CommonPrepare()
self.MakeStep().PrepareBranch()
- self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareNoConfirm(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("n"),
])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
- self.assertEquals("some_branch", self._state["current_branch"])
def testCommonPrepareDeleteBranchFailure(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
@@ -476,7 +468,6 @@ class ScriptTest(unittest.TestCase):
])
self.MakeStep().CommonPrepare()
self.assertRaises(Exception, self.MakeStep().PrepareBranch)
- self.assertEquals("some_branch", self._state["current_branch"])
def testInitialEnvironmentChecks(self):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
@@ -528,10 +519,10 @@ class ScriptTest(unittest.TestCase):
" too much\n"
" trailing", cl)
- self.assertEqual("//\n#define BUILD_NUMBER 3\n",
- MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
+ self.assertEqual("//\n#define V8_BUILD_NUMBER 3\n",
+ MSub(r"(?<=#define V8_BUILD_NUMBER)(?P<space>\s+)\d*$",
r"\g<space>3",
- "//\n#define BUILD_NUMBER 321\n"))
+ "//\n#define V8_BUILD_NUMBER 321\n"))
def testPreparePushRevision(self):
# Tests the default push hash used when the --revision option is not set.
@@ -629,7 +620,7 @@ test_tag
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
- Cmd("git checkout -f origin/master -- src/version.cc",
+ Cmd("git checkout -f origin/master -- include/v8-version.h",
"", cb=lambda: self.WriteFakeVersionFile(3, 22, 6)),
])
@@ -750,11 +741,12 @@ Performance and stability improvements on all platforms."""
self.assertEquals(commit_msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
- self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
- self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
- self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
- self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
- self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+ self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
+ self.assertFalse(re.search(r"#define V8_BUILD_NUMBER\s+6", version))
+ self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+0", version))
+ self.assertTrue(
+ re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
# Check that the change log on the candidates branch got correctly
# modified.
@@ -779,7 +771,7 @@ Performance and stability improvements on all platforms."""
expectations.append(Cmd("which vi", "/usr/bin/vi"))
expectations += [
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git branch", " branch1\n* branch2\n"),
@@ -787,7 +779,7 @@ Performance and stability improvements on all platforms."""
TEST_CONFIG["BRANCHNAME"]), ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
- Cmd("git checkout -f origin/master -- src/version.cc",
+ Cmd("git checkout -f origin/master -- include/v8-version.h",
"", cb=self.WriteFakeVersionFile),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash",
@@ -811,7 +803,7 @@ Performance and stability improvements on all platforms."""
Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
Cmd("git checkout -f origin/candidates -- ChangeLog", "",
cb=ResetChangeLog),
- Cmd("git checkout -f origin/candidates -- src/version.cc", "",
+ Cmd("git checkout -f origin/candidates -- include/v8-version.h", "",
cb=self.WriteFakeVersionFile),
Cmd("git commit -am \"%s\"" % commit_msg_squashed, ""),
]
@@ -833,7 +825,7 @@ Performance and stability improvements on all platforms."""
" origin/candidates", "hsh_to_tag"),
Cmd("git tag 3.22.5 hsh_to_tag", ""),
Cmd("git push origin 3.22.5", ""),
- Cmd("git checkout -f some_branch", ""),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
Cmd("git branch -D %s" % TEST_CONFIG["CANDIDATESBRANCH"], ""),
]
@@ -891,11 +883,12 @@ Performance and stability improvements on all platforms."""
self.assertEquals(commit_msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
- self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
- self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
- self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
- self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
- self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+ self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
+ self.assertFalse(re.search(r"#define V8_BUILD_NUMBER\s+6", version))
+ self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+0", version))
+ self.assertTrue(
+ re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
# Check that the change log on the candidates branch got correctly
# modified.
@@ -921,12 +914,9 @@ Performance and stability improvements on all platforms."""
"+refs/pending-tags/*:refs/pending-tags/*", ""),
Cmd("git checkout -f origin/master", ""),
Cmd("git branch", ""),
- Cmd("git log -1 --format=\"%H %T\" push_hash", "push_hash tree_hash"),
- Cmd("git log -200 --format=\"%H %T\" refs/pending/heads/master",
- "not_right wrong\npending_hash tree_hash\nsome other\n"),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
- Cmd("git checkout -f origin/master -- src/version.cc",
+ Cmd("git checkout -f origin/master -- include/v8-version.h",
"", cb=self.WriteFakeVersionFile),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash", "Version 3.22.4\n"),
@@ -936,15 +926,15 @@ Performance and stability improvements on all platforms."""
Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
Cmd("git log -1 --format=%an rev1", "author1@chromium.org\n"),
Cmd("git reset --hard origin/master", ""),
- Cmd("git checkout -b work-branch pending_hash", ""),
+ Cmd("git checkout -b work-branch push_hash", ""),
Cmd("git checkout -f 3.22.4 -- ChangeLog", "", cb=ResetChangeLog),
- Cmd("git checkout -f 3.22.4 -- src/version.cc", "",
+ Cmd("git checkout -f 3.22.4 -- include/v8-version.h", "",
cb=self.WriteFakeVersionFile),
Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
cb=CheckVersionCommit),
Cmd("git push origin "
"refs/heads/work-branch:refs/pending/heads/3.22.5 "
- "pending_hash:refs/pending-tags/heads/3.22.5 "
+ "push_hash:refs/pending-tags/heads/3.22.5 "
"push_hash:refs/heads/3.22.5", ""),
Cmd("git fetch", ""),
Cmd("git log -1 --format=%H --grep="
@@ -989,15 +979,17 @@ git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
"""
- def testChromiumRoll(self):
- googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG["PERSISTFILE_BASENAME"]
- with open(googlers_mapping_py, "w") as f:
- f.write("""
-def list_to_dict(entries):
- return {"g_name@google.com": "c_name@chromium.org"}
-def get_list():
- pass""")
+ ROLL_COMMIT_MSG = """Update V8 to version 3.22.4 (based on abc).
+
+Summary of changes available at:
+https://chromium.googlesource.com/v8/v8/+log/last_rol..roll_hsh
+
+Please follow these instructions for assigning/CC'ing issues:
+https://code.google.com/p/v8-wiki/wiki/TriagingIssues
+TBR=g_name@chromium.org,reviewer@chromium.org"""
+
+ def testChromiumRoll(self):
# Setup fake directory structures.
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
@@ -1014,18 +1006,10 @@ def get_list():
expectations = [
Cmd("git fetch origin", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git tag", self.TAGS),
- Cmd("git log -1 --format=%H 3.22.4", "push_hash\n"),
- Cmd("git log -1 --format=%s push_hash",
+ Cmd("git log -1 --format=%s roll_hsh",
"Version 3.22.4 (based on abc)\n"),
- Cmd("git log -1 --format=%H 3.22.4", "push_hash\n"),
- Cmd("git log -1 --format=%s push_hash",
- "Version 3.22.4 (based on abc)"),
+ Cmd("git describe --tags roll_hsh", "3.22.4"),
Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
- Cmd("git log -1 --format=%H 3.22.2", "last_roll_base_hash"),
- Cmd("git log -1 --format=%s last_roll_base_hash", "Version 3.22.2"),
- Cmd("git log -1 --format=%H last_roll_base_hash^",
- "last_roll_master_hash"),
URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
"document.write('g_name')"),
Cmd("git status -s -uno", "", cwd=chrome_dir),
@@ -1033,15 +1017,11 @@ def get_list():
Cmd("gclient sync --nohooks", "syncing...", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
Cmd("git fetch origin", ""),
- Cmd("git new-branch v8-roll-push_hash", "", cwd=chrome_dir),
- Cmd("roll-dep v8 push_hash", "rolled", cb=WriteDeps, cwd=chrome_dir),
- Cmd(("git commit -am \"Update V8 to version 3.22.4 "
- "(based on abc).\n\n"
- "Summary of changes available at:\n"
- "https://chromium.googlesource.com/v8/v8/+log/last_rol..abc\n\n"
- "Please reply to the V8 sheriff c_name@chromium.org in "
- "case of problems.\n\nTBR=c_name@chromium.org\" "
- "--author \"author@chromium.org <author@chromium.org>\""),
+ Cmd("git new-branch v8-roll-roll_hsh", "", cwd=chrome_dir),
+ Cmd("roll-dep v8 roll_hsh", "rolled", cb=WriteDeps, cwd=chrome_dir),
+ Cmd(("git commit -am \"%s\" "
+ "--author \"author@chromium.org <author@chromium.org>\"" %
+ self.ROLL_COMMIT_MSG),
"", cwd=chrome_dir),
Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f", "",
cwd=chrome_dir),
@@ -1049,9 +1029,10 @@ def get_list():
self.Expect(expectations)
args = ["-a", "author@chromium.org", "-c", chrome_dir,
- "--sheriff", "--googlers-mapping", googlers_mapping_py,
+ "--sheriff",
"-r", "reviewer@chromium.org",
- "--last-roll", "last_roll_hsh"]
+ "--last-roll", "last_roll_hsh",
+ "roll_hsh"]
ChromiumRoll(TEST_CONFIG, self).Run(args)
deps = FileToText(os.path.join(chrome_dir, "DEPS"))
@@ -1072,11 +1053,7 @@ def get_list():
auto_push.AutoPush, LastReleaseBailout, AUTO_PUSH_ARGS))
def testAutoPush(self):
- TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
-
self.Expect([
- Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
Cmd("git fetch", ""),
Cmd("git fetch origin +refs/heads/roll:refs/heads/roll", ""),
Cmd("git show-ref -s refs/heads/roll", "abc123\n"),
@@ -1127,8 +1104,14 @@ deps = {
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git tag", self.TAGS),
- Cmd("git log -1 --format=%H 3.22.4", "push_hash\n"),
+ Cmd("git rev-list --max-age=740800 --tags",
+ "bad_tag\nhash_234\nhash_123"),
+ Cmd("git describe --tags bad_tag", ""),
+ Cmd("git describe --tags hash_234", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ Cmd("git describe --tags abcd123455", "3.22.4"),
+ Cmd("git describe --tags hash_234", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
@@ -1138,16 +1121,19 @@ deps = {
def testAutoRoll(self):
TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
- TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"] = self.MakeEmptyTempFile()
- TextToFile("fake key", TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"])
self.Expect([
URL("https://codereview.chromium.org/search",
"owner=author%40chromium.org&limit=30&closed=3&format=json",
("{\"results\": [{\"subject\": \"different\"}]}")),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
- Cmd("git tag", self.TAGS),
- Cmd("git log -1 --format=%H 3.22.4", "push_hash\n"),
+ Cmd("git rev-list --max-age=740800 --tags",
+ "bad_tag\nhash_234\nhash_123"),
+ Cmd("git describe --tags bad_tag", ""),
+ Cmd("git describe --tags hash_234", "3.22.4"),
+ Cmd("git describe --tags hash_123", "3.22.3"),
+ Cmd("git describe --tags abcd123455", "3.22.3.1"),
+ Cmd("git describe --tags hash_234", "3.22.4"),
])
result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
@@ -1192,14 +1178,15 @@ LOG=N
self.assertEquals(msg, commit)
version = FileToText(
os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
- self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
- self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
- self.assertTrue(re.search(r"#define PATCH_LEVEL\s+1", version))
- self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
+ self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
+ self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
+ self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+1", version))
+ self.assertTrue(
+ re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
@@ -1273,7 +1260,7 @@ LOG=N
"hsh_to_tag"),
Cmd("git tag 3.22.5.1 hsh_to_tag", ""),
Cmd("git push origin 3.22.5.1", ""),
- Cmd("git checkout -f some_branch", ""),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
@@ -1360,7 +1347,7 @@ Cr-Commit-Position: refs/heads/4.2.71@{#1}
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git status -s -b -uno", "## some_branch\n"),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""),
@@ -1419,6 +1406,10 @@ Cr-Commit-Position: refs/heads/4.2.71@{#1}
Cmd("git status -s -uno", "", cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git pull", "", cwd=chrome_dir),
+ Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"],
+ cwd=chrome_dir),
+ Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "",
+ cwd=chrome_dir),
Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], "",
cwd=chrome_dir),
Cmd("git fetch origin", "", cwd=chrome_v8_dir),
@@ -1453,7 +1444,7 @@ Cr-Commit-Position: refs/heads/4.2.71@{#1}
cwd=chrome_dir),
Cmd("git checkout -f master", "", cwd=chrome_dir),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
- Cmd("git checkout -f some_branch", ""),
+ Cmd("git checkout -f origin/master", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 8627319359..f7e77ca154 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -130,6 +130,7 @@ GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
+ "android_x64",
"arm",
"ia32",
"x87",
@@ -147,6 +148,7 @@ SUPPORTED_ARCHS = ["android_arm",
SLOW_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
+ "android_x64",
"arm",
"mips",
"mipsel",
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 63c9148515..f7710796bc 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -111,6 +111,7 @@ ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
+ "android_x64",
"arm",
"ia32",
"mips",
@@ -533,7 +534,8 @@ class AndroidPlatform(Platform): # pragma: no cover
logging.info("adb -s %s %s" % (str(self.device), cmd))
return self.adb.SendCommand(cmd, timeout_time=60)
- def _PushFile(self, host_dir, file_name, target_rel="."):
+ def _PushFile(self, host_dir, file_name, target_rel=".",
+ skip_if_missing=False):
file_on_host = os.path.join(host_dir, file_name)
file_on_device_tmp = os.path.join(
AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
@@ -541,6 +543,12 @@ class AndroidPlatform(Platform): # pragma: no cover
AndroidPlatform.DEVICE_DIR, target_rel, file_name)
folder_on_device = os.path.dirname(file_on_device)
+ # Only attempt to push files that exist.
+ if not os.path.exists(file_on_host):
+ if not skip_if_missing:
+ logging.critical('Missing file on host: %s' % file_on_host)
+ return
+
# Only push files not yet pushed in one execution.
if file_on_host in self.pushed:
return
@@ -568,6 +576,12 @@ class AndroidPlatform(Platform): # pragma: no cover
bench_abs = suite_dir
self._PushFile(self.shell_dir, node.binary)
+
+ # Push external startup data. Backwards compatible for revisions where
+ # these files didn't exist.
+ self._PushFile(self.shell_dir, "natives_blob.bin", skip_if_missing=True)
+ self._PushFile(self.shell_dir, "snapshot_blob.bin", skip_if_missing=True)
+
if isinstance(node, Runnable):
self._PushFile(bench_abs, node.main, bench_rel)
for resource in node.resources:
diff --git a/deps/v8/tools/test-push-to-trunk.sh b/deps/v8/tools/test-push-to-trunk.sh
deleted file mode 100755
index 6c201e4628..0000000000
--- a/deps/v8/tools/test-push-to-trunk.sh
+++ /dev/null
@@ -1,246 +0,0 @@
-#!/bin/bash
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Tests the push-to-trunk.sh script. Needs to be run in V8 base dir:
-# ./tools/test-push-to-trunk.sh
-
-# TODO(machenbach): Check automatically if expectations match.
-# TODO(machenbach): Mock out version number retrieval.
-# TODO(machenbach): Allow multiple different test cases.
-# TODO(machenbach): Allow multi line mock output.
-# TODO(machenbach): Represent test expectations/mock output without an array
-# index increment.
-
-########## Stdin for push-to-trunk.sh
-
-# Confirm push to trunk commit ID
-INPUT[0]="Y"
-# Open editor
-INPUT[1]=""
-# Confirm increment version number
-INPUT[2]="Y"
-# Reviewer for V8 CL
-INPUT[3]="reviewer@chromium.org"
-# Enter LGTM for V8 CL
-INPUT[4]="LGTM"
-# Confirm checkout sanity
-INPUT[5]="Y"
-# Manually type in trunk revision
-INPUT[6]="12345"
-# Reviewer for Chromium CL
-INPUT[7]="reviewer@chromium.org"
-
-########## Expected commands and mock output
-
-EXP[0]="git status -s -uno"
-OUT[0]=""
-EXP[1]="git status -s -b -uno"
-OUT[1]="## some_branch"
-EXP[2]="git svn fetch"
-OUT[2]=""
-EXP[3]="git branch"
-OUT[3]="not the temp branch"
-EXP[4]="git checkout -b prepare-push-temporary-branch-created-by-script"
-OUT[4]=""
-EXP[5]="git branch"
-OUT[5]="not the branch"
-EXP[6]="git branch"
-OUT[6]="not the trunk branch"
-EXP[7]="git checkout -b prepare-push svn/bleeding_edge"
-OUT[7]=""
-EXP[8]="git log -1 --format=%H ChangeLog"
-OUT[8]="hash1"
-EXP[9]="git log -1 hash1"
-OUT[9]=""
-EXP[10]="git log hash1..HEAD --format=%H"
-OUT[10]="hash2"
-EXP[11]="git log -1 hash2 --format=\"%w(80,8,8)%s\""
-OUT[11]="Log line..."
-EXP[12]="git log -1 hash2 --format=\"%B\""
-OUT[12]="BUG=6789"
-EXP[13]="git log -1 hash2 --format=\"%w(80,8,8)(%an)\""
-OUT[13]=" (author@chromium.org)"
-EXP[14]="git commit -a -m \"Prepare push to trunk. Now working on version 3.4.5.\""
-OUT[14]=""
-EXP[15]="git cl upload -r reviewer@chromium.org --send-mail"
-OUT[15]=""
-EXP[16]="git cl dcommit"
-OUT[16]=""
-EXP[17]="git svn fetch"
-OUT[17]=""
-EXP[18]="git checkout svn/bleeding_edge"
-OUT[18]=""
-EXP[19]="git log -1 --format=%H --grep=Prepare push to trunk. Now working on version 3.4.5."
-OUT[19]="hash3"
-EXP[20]="git diff svn/trunk"
-OUT[20]="patch1"
-EXP[21]="git checkout -b trunk-push svn/trunk"
-OUT[21]=""
-EXP[22]="git apply --index --reject /tmp/v8-push-to-trunk-tempfile-patch"
-OUT[22]=""
-EXP[23]="git add src/version.cc"
-OUT[23]=""
-EXP[24]="git commit -F /tmp/v8-push-to-trunk-tempfile-commitmsg"
-OUT[24]=""
-EXP[25]="git svn dcommit"
-OUT[25]="r1234"
-EXP[26]="git svn tag 3.4.5 -m \"Tagging version 3.4.5\""
-OUT[26]=""
-EXP[27]="git status -s -uno"
-OUT[27]=""
-EXP[28]="git checkout master"
-OUT[28]=""
-EXP[29]="git pull"
-OUT[29]=""
-EXP[30]="git checkout -b v8-roll-12345"
-OUT[30]=""
-EXP[31]="git commit -am Update V8 to version 3.4.5."
-OUT[31]=""
-EXP[32]="git cl upload --send-mail"
-OUT[32]=""
-EXP[33]="git checkout -f some_branch"
-OUT[33]=""
-EXP[34]="git branch -D prepare-push-temporary-branch-created-by-script"
-OUT[34]=""
-EXP[35]="git branch -D prepare-push"
-OUT[35]=""
-EXP[36]="git branch -D trunk-push"
-OUT[36]=""
-
-########## Global temp files for test input/output
-
-export TEST_OUTPUT=$(mktemp)
-export INDEX=$(mktemp)
-export MOCK_OUTPUT=$(mktemp)
-export EXPECTED_COMMANDS=$(mktemp)
-
-########## Command index
-
-inc_index() {
- local I="$(command cat $INDEX)"
- let "I+=1"
- echo "$I" > $INDEX
- echo $I
-}
-
-echo "-1" > $INDEX
-export -f inc_index
-
-########## Mock output accessor
-
-get_mock_output() {
- local I=$1
- let "I+=1"
- command sed "${I}q;d" $MOCK_OUTPUT
-}
-
-export -f get_mock_output
-
-for E in "${OUT[@]}"; do
- echo $E
-done > $MOCK_OUTPUT
-
-########## Expected commands accessor
-
-get_expected_command() {
- local I=$1
- let "I+=1"
- command sed "${I}q;d" $EXPECTED_COMMANDS
-}
-
-export -f get_expected_command
-
-for E in "${EXP[@]}"; do
- echo $E
-done > $EXPECTED_COMMANDS
-
-########## Mock commands
-
-git() {
- # All calls to git are mocked out. Expected calls and mock output are stored
- # in the EXP/OUT arrays above.
- local I=$(inc_index)
- local OUT=$(get_mock_output $I)
- local EXP=$(get_expected_command $I)
- echo "#############################" >> $TEST_OUTPUT
- echo "Com. Index: $I" >> $TEST_OUTPUT
- echo "Expected: ${EXP}" >> $TEST_OUTPUT
- echo "Actual: git $@" >> $TEST_OUTPUT
- echo "Mock Output: ${OUT}" >> $TEST_OUTPUT
- echo "${OUT}"
-}
-
-mv() {
- echo "#############################" >> $TEST_OUTPUT
- echo "mv $@" >> $TEST_OUTPUT
-}
-
-sed() {
- # Only calls to sed * -i * are mocked out.
- echo "#############################" >> $TEST_OUTPUT
- local arr=$@
- if [[ "${arr[@]}" =~ "-i" || "${arr[${#arr[@]}-1]}" == "-i" ]]; then
- echo "sed $@" >> $TEST_OUTPUT
- else
- echo "sed $@" >> $TEST_OUTPUT
- command sed "$@"
- fi
-}
-
-editor() {
- echo "#############################" >> $TEST_OUTPUT
- echo "editor $@" >> $TEST_OUTPUT
-}
-
-cd() {
- echo "#############################" >> $TEST_OUTPUT
- echo "cd $@" >> $TEST_OUTPUT
-}
-
-export -f git
-export -f mv
-export -f sed
-export -f cd
-export -f editor
-export EDITOR=editor
-
-########## Invoke script with test stdin
-
-for i in "${INPUT[@]}"; do
- echo $i
-done | tools/push-to-trunk.sh -c "path/to/chromium"
-
-echo "Collected output:"
-command cat $TEST_OUTPUT
-
-########## Clean up
-
-rm -rf $TEST_OUTPUT
-rm -rf $INDEX
-rm -rf $MOCK_OUTPUT
-rm -rf $EXPECTED_COMMANDS
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index a52fa566b8..d8c15493b4 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -55,8 +55,9 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little",
"android_arm", "android_arm64", "android_ia32", "android_x87",
- "arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
- "nacl_x64", "ppc", "ppc64", "macos", "windows", "linux", "aix"]:
+ "android_x64", "arm", "arm64", "ia32", "mips", "mipsel",
+ "mips64el", "x64", "x87", "nacl_ia32", "nacl_x64", "ppc", "ppc64",
+ "macos", "windows", "linux", "aix"]:
VARIABLES[var] = var
diff --git a/deps/v8/tools/v8-info.sh b/deps/v8/tools/v8-info.sh
index 1f25d147a5..838d92a001 100755
--- a/deps/v8/tools/v8-info.sh
+++ b/deps/v8/tools/v8-info.sh
@@ -30,11 +30,11 @@
########## Global variable definitions
BASE_URL="https://code.google.com/p/v8/source/list"
-VERSION="src/version.cc"
-MAJOR="MAJOR_VERSION"
-MINOR="MINOR_VERSION"
-BUILD="BUILD_NUMBER"
-PATCH="PATCH_LEVEL"
+VERSION="include/v8-version.h"
+MAJOR="V8_MAJOR_VERSION"
+MINOR="V8_MINOR_VERSION"
+BUILD="V8_BUILD_NUMBER"
+PATCH="V8_PATCH_LEVEL"
V8="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"